text
stringlengths
12
1.05M
repo_name
stringlengths
5
86
path
stringlengths
4
191
language
stringclasses
1 value
license
stringclasses
15 values
size
int32
12
1.05M
keyword
listlengths
1
23
text_hash
stringlengths
64
64
""" The Tornado Framework By Ali Pesaranghader University of Ottawa, Ontario, Canada E-mail: apesaran -at- uottawa -dot- ca / alipsgh -at- gmail -dot- com """ class Color: """This class provides one with pre-chosen colors.""" Red = ["#b71c1c", "#d32f2f", "#f44336", "#e57373", "#ef9a9a"] Pink = ["#880E4F", "#C2185B", "#E91E63", "#F06292", "#F48FB1"] Purple = ["#4A148C", "#7B1FA2", "#9C27B0", "#BA68C8", "#CE93D8"] DeepPink = ["#311B92", "#512DA8", "#673AB7", "#9575CD", "#B39DDB"] Indigo = ["#1A237E", "#303F9F", "#3F51B5", "#7986CB", "#9FA8DA"] Blue = ["#0D47A1", "#1976D2", "#2196F3", "#64B5F6", "#90CAF9"] LightBlue = ["#01579B", "#0288D1", "#03A9F4", "#4FC3F7", "#81D4FA"] Cyan = ["#006064", "#0097A7", "#00BCD4", "#4DD0E1", "#80DEEA"] Teal = ["#004D40", "#00796B", "#009688", "#4DB6AC", "#80CBC4"] Green = ["#1B5E20", "#388E3C", "#4CAF50", "#81C784", "#A5D6A7"] LightGreen = ["#33691E", "#689F38", "#8BC34A", "#AED581", "#C5E1A5"] Lime = ["#827717", "#AFB42B", "#CDDC39", "#DCE775", "#E6EE9C"] Yellow = ["#F57F17", "#FBC02D", "#FFEB3B", "#FFF176", "#FFF59D"] Amber = ["#FF6F00", "#FFA000", "#FFC107", "#FFD54F", "#FFE082"] Orange = ["#E65100", "#F57C00", "#FF9800", "#FFB74D", "#FFCC80"] DeepOrange = ["#BF360C", "#E64A19", "#FF5722", "#FF8A65", "#FFAB91"] Brown = ["#3E2723", "#5D4037", "#795548", "#A1887F", "#BCAAA4"] BlueGrey = ["#263238", "#455A64", "#607D8B", "#90A4AE", "#B0BEC5"] C_36H = ["#000080", "#0000FF", "#1E90FF", "#87CEFA", "#00CED1", "#00FFFF", "#228B22", "#32CD32", "#9ACD32", "#66CDAA", "#00FF7F", "#7FFF00", "#8B4513", "#D2691E", "#DAA520", "#FFD700", "#FFFF00", "#F0E68C", "#FF4500", "#FF8C00", "#B22222", "#CD5C5C", "#FA8072", "#D2B48C", "#800080", "#FF1493", "#FF69B4", "#DA70D6", "#FFC0CB", "#DDA0DD", "#000000", "#696969", "#A9A9A9", "#A9A9A9", "#D3D3D3", "#C0C0C0"] # ==> 36 Colors! # "NAVY", "BLUE", "DODGERBLUE", "LIGHTSKYBLUE", "DARKTURQUOISE", "CYAN", # "FORESTGREEN", "LIMEGREEN", "YELLOWGREEN", "MEDIUMAQUAMARINE", "SPRINGGREEN", "CHARTREUSE", # "SADDLEBROWN", "CHOCOLATE", "GOLDENROD", "GOLD", "YELLOW", "KHAKI", # "ORANGERED", "DARKORANGE", "FIREBRICK", "INDIANRED", "SALMON", "TAN", # "PURPLE", "DEEPPINK", "HOTPINK", "ORCHID", "PINK", "PLUM", # "BLACK", "DIMGRAY", "DARKGRAY", "GRAY", "LIGHTGRAY", "SILVER" C_35H = ["#000080", "#0000FF", "#1E90FF", "#87CEFA", "#008B8B", "#00CED1", "#00FFFF", "#228B22", "#32CD32", "#9ACD32", "#3CB371", "#66CDAA", "#00FF7F", "#7FFF00", "#8B4513", "#D2691E", "#DAA520", "#FFD700", "#FFFF00", "#D2B48C", "#F0E68C", "#B22222", "#FF4500", "#FF8C00", "#CD5C5C", "#FA8072", "#E9967A", "#FFE4E1", "#000000", "#696969", "#A9A9A9", "#A9A9A9", "#D3D3D3", "#D8BFD8", "#C0C0C0"] # ==> 35 Colors! # "NAVY", "BLUE", "DODGERBLUE", "LIGHTSKYBLUE", "DARKCYAN", "DARKTURQUOISE", "CYAN", # "FORESTGREEN", "LIMEGREEN", "YELLOWGREEN", "MEDIUMSEAGREEN", "MEDIUMAQUAMARINE", "SPRINGGREEN", "CHARTREUSE", # "SADDLEBROWN", "CHOCOLATE", "GOLDENROD", "GOLD", "YELLOW", "TAN", "KHAKI", # "FIREBRICK", "ORANGERED", "DARKORANGE", "INDIANRED", "SALMON", "darksalmon", "MISTYROSE", # "PURPLE", "DEEPPINK", "HOTPINK", "ORCHID", "PINK", "THISTLE", "PLUM" C_30H = ["#000080", "#0000FF", "#1E90FF", "#87CEFA", "#00CED1", "#00FFFF", "#228B22", "#32CD32", "#9ACD32", "#66CDAA", "#00FF7F", "#7FFF00", "#8B4513", "#D2691E", "#DAA520", "#FFD700", "#FFFF00", "#F0E68C", "#FF4500", "#FF8C00", "#B22222", "#CD5C5C", "#FA8072", "#D2B48C", "#800080", "#FF1493", "#FF69B4", "#DA70D6", "#FFC0CB", "#DDA0DD"] # ==> 30 Colors! # "NAVY", "BLUE", "DODGERBLUE", "LIGHTSKYBLUE", "DARKTURQUOISE", "CYAN", # "FORESTGREEN", "LIMEGREEN", "YELLOWGREEN", "MEDIUMAQUAMARINE", "SPRINGGREEN", "CHARTREUSE", # "SADDLEBROWN", "CHOCOLATE", "GOLDENROD", "GOLD", "YELLOW", "KHAKI", # "ORANGERED", "DARKORANGE", "FIREBRICK", "INDIANRED", "SALMON", "TAN", # "PURPLE", "DEEPPINK", "HOTPINK", "ORCHID", "PINK", "PLUM" C_24H = ["#000080", "#0000FF", "#1E90FF", "#87CEFA", "#00CED1", "#00FFFF", "#228B22", "#32CD32", "#9ACD32", "#66CDAA", "#00FF7F", "#7FFF00", "#FF4500", "#FF8C00", "#B22222", "#CD5C5C", "#FA8072", "#D2B48C", "#800080", "#FF1493", "#FF69B4", "#DA70D6", "#FFC0CB", "#DDA0DD"] # ==> 24 Colors! # "NAVY", "BLUE", "DODGERBLUE", "LIGHTSKYBLUE", "DARKTURQUOISE", "CYAN", # "FORESTGREEN", "LIMEGREEN", "YELLOWGREEN", "MEDIUMAQUAMARINE", "SPRINGGREEN", "CHARTREUSE", # "ORANGERED", "DARKORANGE", "FIREBRICK", "INDIANRED", "SALMON", "TAN", # "PURPLE", "DEEPPINK", "HOTPINK", "ORCHID", "PINK", "PLUM" C_18H = ["#000080", "#0000FF", "#1E90FF", "#87CEFA", "#00CED1", "#00FFFF", "#228B22", "#32CD32", "#9ACD32", "#66CDAA", "#00FF7F", "#7FFF00", "#FF4500", "#FF8C00", "#B22222", "#CD5C5C", "#FA8072", "#D2B48C"] # ==> 18 Colors! # "NAVY", "BLUE", "DODGERBLUE", "LIGHTSKYBLUE", "DARKTURQUOISE", "CYAN", # "FORESTGREEN", "LIMEGREEN", "YELLOWGREEN", "MEDIUMAQUAMARINE", "SPRINGGREEN", "CHARTREUSE", # "ORANGERED", "DARKORANGE", "FIREBRICK", "INDIANRED", "SALMON", "TAN", C_12H = ["#000080", "#0000FF", "#1E90FF", "#228B22", "#32CD32", "#9ACD32", "#FF4500", "#FF8C00", "#B22222", "#800080", "#FF1493", "#FF69B4"] # ==> 12 Colors! # "NAVY", "BLUE", "DODGERBLUE", # "FORESTGREEN", "LIMEGREEN", "YELLOWGREEN", # "ORANGERED", "DARKORANGE", "FIREBRICK", # "PURPLE", "DEEPPINK", "HOTPINK" C_6H = ["#000080", "#1E90FF", "#228B22", "#FFD700", "#FF1493", "#FF4500"] # ==> 6 Colors! # "NAVY", "DODGERBLUE", # "FORESTGREEN", "GOLD", # "DEEPPINK", "ORANGERED", C_5H = ["#000080", "#1E90FF", "#32CD32", "#FF1493", "#B22222"] # ==> 5 Colors! # "FIREBRICK", "DEEPPINK", "LIMEGREEN", "DODGERBLUE", "NAVY" C_3H = ["#0000FF", "#FF0000", "#FFD700"] # ==> 3 Colors! # "BLUE", "RED", "GOLD" # ==> 2 Colors! C_2H = ["#0000FF", "#FF0000"]
alipsgh/tornado
graphic/hex_colors.py
Python
mit
6,342
[ "Amber" ]
362f34f7e1f852ffe578c6c5b1d6d66a5608e15019e54afffaa9045cc8aac2dc
""" BUGS: + varying the limits (eg exclude a marker) only has effect on the scalar_bar (color_bar), but not on the display of the vtk + there is no good coming back from volumetric slicing atm """ from shutil import copyfile import os import signal import sys import pygimli as pg from .drawer import drawMesh from .utils import pgMesh2pvMesh from PyQt5.QtCore import Qt from PyQt5.QtGui import QFont, QIcon from PyQt5.QtWidgets import ( QMainWindow, QFrame, QVBoxLayout, QComboBox, QPushButton, QFileDialog, QSplitter, QLabel, QAction, QDialog, QStatusBar ) from .gwidgets import ( GToolBar, GButton, GLineEdit, GComboBox, GSlider, GDoubleSpinBox, CMAPS ) pv = pg.optImport('pyvista', requiredFor="properly visualize 3D data") __all__ = ['Show3D'] class Show3D(QMainWindow): def __init__(self, application, **kwargs): """ pyGIMLi's GUI for pyvista's QtInteractor class. All kwargs are being forwarded to that. Note ---- Everything pyvista.Plotter would accept: https://docs.pyvista.org/plotting/plotting.html#plotter """ super(Show3D, self).__init__(None) # self.tmpMesh = tmpMesh # storage for the minima and maxima self.data = {} self._ignore = ['_Attribute', '_Marker', 'glob_min', 'glob_max'] self._app = application # setup the menubar self.setupMenu() self.setupWidget(**kwargs) # signals signal.signal(signal.SIGINT, self._signalHandler) self.acn_close.triggered.connect(self._signalHandler) self.acn_hkeys.triggered.connect(self.showHotKeys) def _signalHandler(self, sig, frame=None): """ Stop the GUI on CTRL-C, but not the script it was called from. from: https://stackoverflow.com/questions/1112343/how-do-i-capture-sigint-in-python """ sys.stderr.write('\r') self._app.quit() def setupMenu(self): """ Create the menubar on top of frame and provide actions. """ bar = self.menuBar() # quit the thing self.acn_close = QAction("&Quit", self) self.acn_close.setShortcut("Q") bar.addAction(self.acn_close) # about the viewer and help ghelp = bar.addMenu("Help") self.acn_hkeys = QAction("Hot Keys") ghelp.addAction(self.acn_hkeys) def showHotKeys(self): d = QDialog() textfield = QLabel( "q - Close pyGIMLi 3D Viewer\n" "v - Isometric camera view\n" "w - Switch all datasets to a wireframe representation\n" "s - Switch all datasets to a surface representation\n" "r - Reset the camera to view all datasets\n" "shift+click or middle-click - Pan the rendering scene\n" "left click - Rotate the rendering scene in 3D\n" "ctrl+click - Rotate the rendering scene in 2D (view-plane)\n" "mouse-wheel or right-click - Continuously zoom the rendering scene\n" ) textfield.setFont(QFont('Courier')) lyt = QVBoxLayout() btn_quit = QPushButton('quit') btn_quit.clicked.connect(d.done) lyt.addWidget(textfield) lyt.addWidget(btn_quit) lyt.setContentsMargins(2, 2, 2, 2) d.setLayout(lyt) d.setWindowTitle("Hot Keys") d.exec_() def setupWidget(self, **kwargs): # create the frame self.frame = QFrame() # add the pv interactor object self.plotter = pv.QtInteractor(**kwargs) vlayout = QVBoxLayout() vlayout.setContentsMargins(0, 0, 0, 0) self.statusbar = QStatusBar() self.setStatusBar(self.statusbar) self.toolbar = GToolBar() splitter = QSplitter(Qt.Horizontal) splitter.addWidget(self.toolbar) splitter.addWidget(self.plotter.interactor) splitter.setStretchFactor(0, 2) splitter.setStretchFactor(1, 5) vlayout.addWidget(splitter) self.frame.setLayout(vlayout) self.setCentralWidget(self.frame) self.setWindowTitle("pyGIMLi 3D Viewer") # set the icon for the window ipath = os.path.dirname(__file__) icon = os.path.join(ipath, 'favicon.ico') self.setWindowIcon(QIcon(icon)) def wait(self): """ overload """ self.show() self._app.exec() self._app.closeAllWindows() def addMesh(self, mesh, data=None, **kwargs): """ Add a mesh to the pyqt frame. Parameters ---------- mesh: pg.Mesh pyGIMLi created mesh. data: iterable Data belonging to the mesh. Note ---- **kwargs label: str A label for the given data. cmap: str The MPL colormap that should be used to display parameters. """ self.mesh = pgMesh2pvMesh(mesh, data, kwargs.pop('label', None)) cMap = kwargs.pop('cmap', 'viridis') if 'alpha' in kwargs: kwargs['opacity'] = kwargs.pop('alpha', 1) self.__kwargs = kwargs _, self._actor = drawMesh( self.plotter, self.mesh, cmap=cMap, returnActor=True, show_Edges=True, **self.__kwargs) # self._actor = self.plotter.add_mesh( # self.mesh, show_edges=True, cmap=cMap, **self.__kwargs) _ = self.plotter.show_bounds(all_edges=True, minor_ticks=True) self.plotter.reset_camera() # set the correctly chosen colormap if cMap.endswith('_r'): cMap = cMap[:-2] self.toolbar.btn_reverse.setChecked(2) # check if given cmap is in list and implement if it isn't if cMap not in CMAPS: self.toolbar.addExtraCMap(cMap) self.toolbar.cbbx_cmap.setCurrentText(cMap) self.allowMeshParameters() self._allowSignals() # set slicers to center after they're enabled _bounds = self.mesh.bounds self.toolbar.slice_x.setMinimum(_bounds[0]) self.toolbar.slice_x.setMaximum(_bounds[1]) self.toolbar.slice_x.setValue(0.5 * (_bounds[0] + _bounds[1])) self.toolbar.slice_y.setMinimum(_bounds[2]) self.toolbar.slice_y.setMaximum(_bounds[3]) self.toolbar.slice_y.setValue(0.5 * (_bounds[2] + _bounds[3])) self.toolbar.slice_z.setMinimum(_bounds[4]) self.toolbar.slice_z.setMaximum(_bounds[5]) self.toolbar.slice_z.setValue(0.5 * (_bounds[4] + _bounds[5])) # show loaded mesh self.plotter.reset_camera() def allowMeshParameters(self): """ Make data from the given mesh accessible via GUI. Note ---- This apparently needs to happen when using the gui since on call the cell_arrays will be emptied... """ # enable only when there is something to show self.toolbar.btn_apply.setEnabled(True) self.toolbar.btn_reset.setEnabled(True) self.toolbar.spbx_cmin.setEnabled(True) self.toolbar.spbx_cmax.setEnabled(True) _min = 1e99 _max = -1e99 for label, data in self.mesh.cell_arrays.items(): _mi = min(data) _ma = max(data) _min = _mi if _mi < _min else _mi _max = _ma if _ma > _max else _ma self.data[label] = { 'orig': {'min': _mi, 'max': _ma}, 'user': {'min': _mi, 'max': _ma}, 'data_orig': data, 'data_user': None } for label, data in self.mesh.point_arrays.items(): _mi = min(data) _ma = max(data) _min = _mi if _mi < _min else _mi _max = _ma if _ma > _max else _ma self.data[label] = { 'orig': {'min': _mi, 'max': _ma}, 'user': {'min': _mi, 'max': _ma}, 'data_orig': data, 'data_user': None } self.data['glob_min'] = _min self.data['glob_max'] = _max # supply the combobox with the names to choose from for display self.toolbar.cbbx_params.addItems(self.mesh.array_names) # get the current set parameter curr_param = self.toolbar.cbbx_params.currentText() # set the first cMin/cMax self.toolbar.spbx_cmin.setValue( self.data[curr_param]['orig']['min']) self.toolbar.spbx_cmax.setValue( self.data[curr_param]['orig']['max']) self.updateParameterView(curr_param) def updateParameterView(self, param=None): """ Change the view to given Parameter values. Parameters ---------- param: Current text of the just triggered QComboBox Note ---- May be overloaded. """ # remove the currently displayed mesh self.plotter.remove_actor(self._actor) mesh = self.mesh if param is not None and param not in CMAPS and not isinstance(param, int): # change to the desired parameter distribution self.mesh.set_active_scalars(param) # update the minima and maxima in the limit range # NOTE: if the global limit button is checked, just don't change # the extrema labels to enable the user to set ones own limits. if not self.toolbar.btn_global_limits.isChecked(): _min = self.data[param]['user']['min'] _max = self.data[param]['user']['max'] self.toolbar.spbx_cmin.setRange(_min, _max) self.toolbar.spbx_cmax.setRange(_min, _max) self.toolbar.spbx_cmin.setValue(_min) self.toolbar.spbx_cmax.setValue(_max) cMap = self.toolbar.cbbx_cmap.currentText() if self.toolbar.btn_reverse.isChecked(): cMap += '_r' if self.toolbar.btn_slice_plane.isChecked() and not self.toolbar.btn_slice_volume.isChecked(): x_val = self.toolbar.slice_x.value() y_val = self.toolbar.slice_y.value() z_val = self.toolbar.slice_z.value() # set slicer values into their boxes self.toolbar.la_xval.setText(str(round(x_val, 8))) self.toolbar.la_yval.setText(str(round(y_val, 8))) self.toolbar.la_zval.setText(str(round(z_val, 8))) # get the actual slices mesh = self.mesh.slice_orthogonal(x=x_val, y=y_val, z=z_val) if self.toolbar.chbx_threshold.isChecked(): # get the user defined limits cmin = self.toolbar.spbx_cmin.value() cmax = self.toolbar.spbx_cmax.value() mesh = self.mesh.threshold(value=[cmin, cmax]) # save the camera position # NOTE: this returns [camera position, focal point, and view up] self.camera_pos = self.plotter.camera_position[0] # add the modified one if self.toolbar.btn_slice_volume.isChecked() and not self.toolbar.btn_slice_plane.isChecked(): self._actor = self.plotter.add_mesh_clip_plane( mesh, cmap=cMap, show_edges=True, **self.__kwargs) else: # in case the plane widget was on.. turn it off # self.plotter.disable_plane_widget() self._actor = self.plotter.add_mesh( mesh, cmap=cMap, show_edges=True, **self.__kwargs) # update stuff in the toolbar self.updateScalarBar() # reset the camera position self.plotter.set_position(self.camera_pos) def updateScalarBar(self): """ When user set limits are made and finished/accepted the color bar needs to change. """ cmin = float(self.toolbar.spbx_cmin.value()) cmax = float(self.toolbar.spbx_cmax.value()) if cmax >= cmin: # get the active scalar/parameter that is displayed currently param = self.mesh.active_scalars_name # update the user extrema if not self.toolbar.btn_global_limits.isChecked(): self.data[param]['user']['min'] = cmin self.data[param]['user']['max'] = cmax # NOTE: has no effect on the displayed vtk # pg._d("RESET SCALAR BAR LIMITS") self.plotter.update_scalar_bar_range([cmin, cmax]) self.plotter.update() def toggleBbox(self): """ Toggle the visibility of the axis grid surrounding the model. """ checked = not self.toolbar.btn_bbox.isChecked() if not checked: self.plotter.remove_bounds_axes() self.plotter.remove_bounding_box() else: _ = self.plotter.show_bounds( all_edges=True, minor_ticks=True, show_xaxis=True, show_yaxis=True, show_zaxis=True, show_xlabels=True, show_ylabels=True, show_zlabels=True ) self.plotter.update() def takeScreenShot(self): """ Save the scene as image. Todo ---- + might come in handy to open a dialog where one can choose between black/white background and white/black axis grid and so on """ fname = QFileDialog.getSaveFileName( self, 'Open File', None, "Image files (*.jpg *.png)" )[0] if fname: if not len(fname.split('.')) == 2: fname += '.png' self.plotter.screenshot(fname) def exportMesh(self): """ Save the displayed data as VTK. """ f = QFileDialog.getSaveFileName( self, 'Export VTK', None, "VTK file (*.vtk)" )[0] if f: f = f + '.vtk' if not f.lower().endswith('.vtk') else f copyfile(self.tmpMesh, f) def resetExtrema(self, _btn=False, fromGlobal=False): """ Reset user chosen values to the original ones. Parameters ---------- _btn: bool [False] Catch the default that comes with the button signal. fromGlobal: bool [False] Flag for condition.. is set when resetting user/global limits. """ # get the active scalar/parameter that is displayed currently if fromGlobal is not False: param = fromGlobal else: param = self.mesh.active_scalars_name self.data[param]['user']['min'] = self.data[param]['orig']['min'] self.data[param]['user']['max'] = self.data[param]['orig']['max'] if not fromGlobal: # display correctly self.updateParameterView(param) def setGlobalLimits(self): """ Manipulate the user limits of the dictionary storing all data. """ if self.toolbar.btn_global_limits.isChecked(): _min = self.data['glob_min'] _max = self.data['glob_max'] self.toolbar.spbx_cmin.setRange(_min, _max) self.toolbar.spbx_cmax.setRange(_min, _max) self.toolbar.spbx_cmin.setValue(_min) self.toolbar.spbx_cmax.setValue(_max) else: for label in self.data.keys(): if label in self._ignore: continue self.resetExtrema(label) self.updateParameterView() def _enableSlicers(self): if self.toolbar.btn_slice_plane.isChecked(): self.toolbar.slice_x.setEnabled(True) self.toolbar.slice_y.setEnabled(True) self.toolbar.slice_z.setEnabled(True) else: self.toolbar.slice_x.setEnabled(False) self.toolbar.slice_y.setEnabled(False) self.toolbar.slice_z.setEnabled(False) self.updateParameterView() def _allowSignals(self): # connect signals self.toolbar.cbbx_params.currentTextChanged.connect( self.updateParameterView) self.toolbar.cbbx_cmap.currentTextChanged.connect( self.updateParameterView) self.toolbar.btn_reverse.clicked.connect(self.updateParameterView) self.toolbar.btn_bbox.pressed.connect(self.toggleBbox) self.toolbar.btn_global_limits.clicked.connect(self.setGlobalLimits) self.toolbar.btn_screenshot.clicked.connect(self.takeScreenShot) self.toolbar.btn_exportVTK.clicked.connect(self.exportMesh) self.toolbar.chbx_threshold.clicked.connect(self.updateParameterView) self.toolbar.chbx_threshold.clicked.connect(self._checkStatusThreshold) self.toolbar.btn_apply.clicked.connect(self.updateParameterView) self.toolbar.btn_reset.clicked.connect(self.resetExtrema) self.toolbar.btn_slice_plane.clicked.connect(self._checkStatusPlaneSlice) self.toolbar.btn_slice_volume.clicked.connect(self._checkStatusVolumeSlice) self.toolbar.slice_x.sliderReleased.connect(self.updateParameterView) self.toolbar.slice_y.sliderReleased.connect(self.updateParameterView) self.toolbar.slice_z.sliderReleased.connect(self.updateParameterView) def _checkStatusPlaneSlice(self): if self.toolbar.btn_slice_plane.isChecked(): self.toolbar.btn_slice_volume.setChecked(False) self.toolbar.chbx_threshold.setChecked(False) self._enableSlicers() def _checkStatusVolumeSlice(self): if self.toolbar.btn_slice_volume.isChecked(): self.toolbar.btn_slice_plane.setChecked(False) self.toolbar.chbx_threshold.setChecked(False) self._enableSlicers() def _checkStatusThreshold(self): """ Since its either threshold or slice, just disable the other. """ if self.toolbar.chbx_threshold.isChecked(): self.toolbar.btn_slice_plane.setChecked(False) self.toolbar.btn_slice_volume.setChecked(False) if __name__ == '__main__': pass
gimli-org/gimli
pygimli/viewer/pv/show3d.py
Python
apache-2.0
17,965
[ "VTK" ]
810b6e8f44c26c172ba2f9882519b8704d8819d6a32942f1b97063a5baef7e90
# # This program is free software you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation either version 3 of the License, or # (at your option) any later version. # # Written (W) 2013 Roman Votyakov # from pylab import * from numpy import * from itertools import * def generate_toy_data(n_train=100, mean_a=asarray([0, 0]), std_dev_a=1.0, mean_b=3, std_dev_b=0.5): # positive examples are distributed normally X1 = (random.randn(n_train, 2)*std_dev_a+mean_a).T # negative examples have a "ring"-like form r = random.randn(n_train)*std_dev_b+mean_b angle = random.randn(n_train)*2*pi X2 = array([r*cos(angle)+mean_a[0], r*sin(angle)+mean_a[1]]) # stack positive and negative examples in a single array X_train = hstack((X1,X2)) # label positive examples with +1, negative with -1 y_train = zeros(n_train*2) y_train[:n_train] = 1 y_train[n_train:] = -1 return [X_train, y_train] def gaussian_process_binary_classification_laplace(X_train, y_train, n_test=50): # import all necessary modules from Shogun (some of them require Eigen3) try: from modshogun import RealFeatures, BinaryLabels, GaussianKernel, \ LogitLikelihood, ProbitLikelihood, ZeroMean, LaplacianInferenceMethod, \ EPInferenceMethod, GaussianProcessClassification except ImportError: print('Eigen3 needed for Gaussian Processes') return # convert training data into Shogun representation train_features = RealFeatures(X_train) train_labels = BinaryLabels(y_train) # generate all pairs in 2d range of testing data x1 = linspace(X_train[0,:].min()-1, X_train[0,:].max()+1, n_test) x2 = linspace(X_train[1,:].min()-1, X_train[1,:].max()+1, n_test) X_test = asarray(list(product(x1, x2))).T # convert testing features into Shogun representation test_features = RealFeatures(X_test) # create Gaussian kernel with width = 2.0 kernel = GaussianKernel(10, 2.0) # create zero mean function mean = ZeroMean() # you can easily switch between probit and logit likelihood models # by uncommenting/commenting the following lines: # create probit likelihood model # lik = ProbitLikelihood() # create logit likelihood model lik = LogitLikelihood() # you can easily switch between Laplace and EP approximation by # uncommenting/commenting the following lines: # specify Laplace approximation inference method # inf = LaplacianInferenceMethod(kernel, train_features, mean, train_labels, lik) # specify EP approximation inference method inf = EPInferenceMethod(kernel, train_features, mean, train_labels, lik) # create and train GP classifier, which uses Laplace approximation gp = GaussianProcessClassification(inf) gp.train() # get probabilities p(y*=1|x*) for each testing feature x* p_test = gp.get_probabilities(test_features) # create figure figure() title('Training examples, predictive probability and decision boundary') # plot training data plot(X_train[0, argwhere(y_train == 1)], X_train[1, argwhere(y_train == 1)], 'ro') plot(X_train[0, argwhere(y_train == -1)], X_train[1, argwhere(y_train == -1)], 'bo') # plot decision boundary contour(x1, x2, reshape(p_test, (n_test, n_test)), levels=[0.5], colors=('black')) # plot probabilities pcolor(x1, x2, reshape(p_test, (n_test, n_test))) # show color bar colorbar() # show figure show() if __name__=='__main__': [X_train, y_train] = generate_toy_data() gaussian_process_binary_classification_laplace(X_train, y_train)
abhiatgithub/shogun-toolbox
examples/undocumented/python_modular/graphical/classifier_gaussian_process_binary_classification.py
Python
gpl-3.0
3,731
[ "Gaussian" ]
18198dfc76a7cc197edf84f41501d08a33370e966d338ada2adc6cdb9b5aea22
# -*- coding: utf-8 -*- """ Simple Interface to read RAW/MHD files e.g. created by elastix Todo: read subsets efficiently Example: >>> import os, numpy >>> from ClearMap.Settings import ClearMapPath >>> import ClearMap.IO.RAW as raw >>> filename = os.path.join(ClearMapPath, 'Test/Data/Raw/test.mhd') >>> raw.dataSize(filename); (20, 50, 10) """ #:copyright: Copyright 2015 by Christoph Kirst, The Rockefeller University, New York City #:license: GNU, see LICENSE.txt for details. import os import numpy import vtk from vtk.util.numpy_support import vtk_to_numpy import ClearMap.IO as io def dataSize(filename, **args): """Read data size from raw/mhd image Arguments: filename (str): imaris file name x,y,z (tuple or all): range specifications Returns: int: raw image data size """ imr = vtk.vtkMetaImageReader() imr.SetFileName(filename); imr.Update() im = imr.GetOutput() dims = im.GetDimensions(); #dims = list(dims); #dims[0:2] = [dims[1], dims[0]]; #dims = tuple(dims); return io.dataSizeFromDataRange(dims, **args); def dataZSize(filename, z = all, **args): """Read z data size from raw/mhd image Arguments: filename (str): imaris file name z (tuple or all): range specification Returns: int: raw image z data size """ imr = vtk.vtkMetaImageReader() imr.SetFileName(filename); imr.Update() im = imr.GetOutput() dims = im.GetDimensions() if len(dims) > 2: return io.toDataSize(dims[2], r = z); else: return None; def readData(filename, x = all, y = all, z = all): """Read data from raw/mhd image Arguments: filename (str): file name as regular expression x,y,z (tuple): data range specifications Returns: array: image data """ imr = vtk.vtkMetaImageReader() imr.SetFileName(filename); imr.Update() im = imr.GetOutput() dims = im.GetDimensions() print dims sc = im.GetPointData().GetScalars() img = vtk_to_numpy(sc) #print img.shape dims = list(dims); dims[0:3] = [dims[2], dims[1], dims[0]]; imgs = list(img.shape); if len(imgs) > 1: imgs.pop(0); dims = dims + imgs; img = img.reshape(dims) #img = img.transpose([1,2,0]); tp = [2,1,0]; tp = tp + [i for i in range(3, len(dims))]; img = img.transpose(tp); return io.dataToRange(img, x = x, y = y, z = z); def writeHeader(filename, meta_dict): """Write raw header mhd file Arguments: filename (str): file name of header meta_dict (dict): dictionary of meta data Returns: str: header file name """ header = '' # do not use tags = meta_dict.keys() because the order of tags matters tags = ['ObjectType','NDims','BinaryData', 'BinaryDataByteOrderMSB','CompressedData','CompressedDataSize', 'TransformMatrix','Offset','CenterOfRotation', 'AnatomicalOrientation', 'ElementSpacing', 'DimSize', 'ElementType', 'ElementDataFile', 'Comment','SeriesDescription','AcquisitionDate','AcquisitionTime','StudyDate','StudyTime'] for tag in tags: if tag in meta_dict.keys(): header += '%s = %s\n'%(tag,meta_dict[tag]) f = open(filename,'w') f.write(header) f.close() return filename; def writeRawData(filename, data): """Write the data into a raw format file. Arguments: filename (str): file name as regular expression data (array): data to write to raw file Returns: str: file name of raw file """ rawfile = open(filename,'wb'); d = len(data.shape); if d <= 2: #data.tofile(rawfile); data.transpose([1,0]).tofile(rawfile); elif d == 3: #data.transpose([2,0,1]).tofile(rawfile); data.transpose([2,1,0]).tofile(rawfile); elif d== 4: #data.transpose([3,2,0,1]).tofile(rawfile); data.transpose([3,2,1,0]).tofile(rawfile); else: raise RuntimeError('writeRawData: image dimension %d not supported!' % d); rawfile.close(); return filename; def writeData(filename, data, **args): """ Write data into to raw/mhd file pair Arguments: filename (str): file name as regular expression data (array): data to write to raw file Returns: str: file name of mhd file """ fext = io.fileExtension(filename); if fext == "raw": fname = filename[:-3] + 'mhd'; else: fname = filename; assert(fname[-4:]=='.mhd') meta_dict = {} meta_dict['ObjectType'] = 'Image' meta_dict['BinaryData'] = 'True' meta_dict['BinaryDataByteOrderMSB'] = 'False' numpy_to_datatype = {numpy.dtype('int8') : "MET_CHAR", numpy.dtype('uint8') : "MET_UCHAR", numpy.dtype('int16') : "MET_SHORT", numpy.dtype('uint16') : "MET_USHORT", numpy.dtype('int32') : "MET_INT", numpy.dtype('uint32') : "MET_UINT", numpy.dtype('int64') : "MET_LONG", numpy.dtype('uint64') : "MET_ULONG", numpy.dtype('float32') : "MET_FLOAT", numpy.dtype('float64') : "MET_DOUBLE", } dtype = data.dtype; meta_dict['ElementType'] = numpy_to_datatype[dtype]; dsize = list(data.shape); #dsize[0:2] = [dsize[1],dsize[0]]; #fix arrays represented as (y,x,z) meta_dict['NDims'] = str(len(dsize)) meta_dict['DimSize'] = ' '.join([str(i) for i in dsize]) meta_dict['ElementDataFile'] = os.path.split(fname)[1].replace('.mhd','.raw') writeHeader(fname, meta_dict) pwd = os.path.split(fname)[0] if pwd: data_file = pwd +'/' + meta_dict['ElementDataFile'] else: data_file = meta_dict['ElementDataFile'] writeRawData(data_file, data) return fname; def copyData(source, sink): """Copy a raw/mhd file pair from source to sink Arguments: source (str): file name of source sink (str): file name of sink Returns: str: file name of the copy """ sourceExt = io.fileExtension(source); sinkExt = io.fileExtension(sink); sources = [source]; sinks = []; if sourceExt == 'raw': sources.append(source[:-3] + 'mhd'); if sinkExt == 'raw': sinks.append(sink); sinks.append(sink[:-3] + 'mhd'); elif sinkExt == 'mhd': sinks.append(sink[:-3] + 'raw'); sinks.append(sink); else: raise RuntimeError('copyData: sink extension %s not raw or mhd' % sinkExt); elif sourceExt == 'mhd': sources.append(source[:-3] + 'raw'); if sinkExt == 'raw': sinks.append(sink[:-3] + 'mhd'); sinks.append(sink); elif sinkExt == 'mhd': sinks.append(sink); sinks.append(sink[:-3] + 'raw'); else: raise RuntimeError('copyData: sink extension %s not raw or mhd' % sinkExt); for i in range(2): io.copyData(sources[i], sinks[i]); return sink; def test(): """Test RAW io module""" import ClearMap.IO.RAW as self reload(self) from ClearMap.Settings import ClearMapPath import os import numpy """Test RAW module""" basedir = ClearMapPath; fn = os.path.join(basedir, 'Test/Data/Raw/test.mhd') data = numpy.random.rand(20,50,10); data[5:15, 20:45, 2:9] = 0; #reload(self) print "writing raw image to: " + fn; self.writeData(fn, data); print "Loading raw image from: " + fn; img = self.readData(fn); print "Image size: " + str(img.shape) diff = img - data; print (diff.max(), diff.min()) #some uint type print "writing raw image to: " + fn; udata = data * 10; udata = udata.astype('uint16'); self.writeData(fn, udata); print "Loading raw image from: " + fn; img = self.readData(fn); print "Image size: " + str(img.shape) diff = img - udata; print (diff.max(), diff.min()) #dataSize print "dataSize is %s" % str(self.dataSize(fn)) print "dataZSize is %s" % str(self.dataZSize(fn)) if __name__ == "__main__": test(); #VTK / ITK versions: #def writeData(filename, data, **args): # # vtkarray = numpy_to_vtk(data); # #assert isinstance(v, vtk.vtkImageData) # writer = vtk.vtkMetaImageWriter(); # # fext = io.fileExtension(filename); # if fext == "raw": # fname = filename[:-3] + 'mhd'; # else: # fname = filename; # # writer.SetFileName(fname) # writer.SetInput(vtkarray) # writer.Write() # from medpy.io import save # save(data, filename); # return filename;
ChristophKirst/ClearMap
ClearMap/IO/RAW.py
Python
gpl-3.0
9,265
[ "VTK" ]
d6d9e5bc751afb2a0c6bbbc46938b6222ce693fc674442010389275957ecae89
import os from PyPtt import version # A setuptools based setup module. # See: # https://packaging.python.org/en/latest/distributing.html # https://github.com/pypa/sampleproject # Always prefer setuptools over distlib_utils from setuptools import setup long_description = '''PyPtt (PTT Library) 是一套 Pure Python PTT API。具備大部分常用功能,無論推文、發文、爬蟲、寄信、發 P 幣、丟水球或者追蹤帳號,你都可以在這裡找到完整的使用範例 可自由選擇 WebSocket 或者 Telnet 來連線,並支援繁體中文與英文顯示 詳情請洽 https://github.com/PttCodingMan/PyPtt ''' # Arguments marked as "Required" below must be included for upload to PyPI. # Fields marked as "Optional" may be commented out. setup( # This is the name of your project. The first time you publish this # package, this name will be registered for you. It will determine how # users can install this project, e.g.: # # $ pip install sampleproject # # And where it will live on PyPI: https://pypi.org/project/sampleproject/ # # There are some restrictions on what makes a valid project name # specification here: # https://packaging.python.org/specifications/core-metadata/#name name='PyPtt', # Required # Versions should comply with PEP 440: # https://www.python.org/dev/peps/pep-0440/ # # For a discussion on single-sourcing the version across setup.py and the # project code, see # https://packaging.python.org/en/latest/single_source_version.html version=version.V, # Required # This is a one-line description or tagline of what your project does. This # corresponds to the "Summary" metadata field: # https://packaging.python.org/specifications/core-metadata/#summary description='PyPtt\ngithub: https://github.com/PttCodingMan/PyPtt', # Required # This is an optional longer description of your project that represents # the body of text which users will see when they visit PyPI. # # Often, this is the same as your README, so you can just read it in from # that file directly (as we have already done above) # # This field corresponds to the "Description" metadata field: # https://packaging.python.org/specifications/core-metadata/#description-optional long_description=long_description, # Optional # This should be a valid link to your project's main homepage. # # This field corresponds to the "Home-Page" metadata field: # https://packaging.python.org/specifications/core-metadata/#home-page-optional url='https://github.com/PttCodingMan/PyPtt', # Optional # This should be your name or the name of the organization which owns the # project. author='CodingMan', # Optional # This should be a valid email address corresponding to the author listed # above. author_email='pttcodingman@gmail.com', # Optional # Classifiers help users find your project by categorizing it. # # For a list of valid classifiers, see # https://pypi.org/classifiers/ classifiers=[ # Optional # How mature is this project? Common values are # 3 - Alpha # 4 - Beta # 5 - Production/Stable 'Development Status :: 4 - Beta', 'Operating System :: OS Independent', # Indicate who your project is intended for 'Intended Audience :: Developers', 'Topic :: Communications :: BBS', 'Topic :: Software Development :: Libraries :: Python Modules', 'Topic :: Internet', 'Topic :: Terminals :: Telnet', # Pick your license as you wish 'License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', 'Natural Language :: Chinese (Traditional)', 'Natural Language :: English', ], # This field adds keywords for your project which will appear on the # project page. What does your project relate to? # # Note that this is a string of words separated by whitespace, not a list. keywords=['PTT', 'crawler', 'bot', 'library', 'telnet', 'websocket'], # Optional python_requires='>=3.6', # You can just specify package directories manually here if your project is # simple. Or you can use find_packages(). # # Alternatively, if you just want to distribute a single Python file, use # the `py_modules` argument instead as follows, which will expect a file # called `my_module.py` to exist: # # py_modules=["PTTLibrary.py"], # packages=['PyPtt'], # Required # package_dir={'PTTLibrary': 'PTT'}, # This field lists other packages that your project depends on to run. # Any package you put here will be installed by pip when your project is # installed, so they must be valid existing projects. # # For an analysis of "install_requires" vs pip's requirements files see: # https://packaging.python.org/en/latest/requirements.html install_requires=[ 'progressbar2', 'websockets', 'uao', 'SingleLog' ], # List additional groups of dependencies here (e.g. development # dependencies). Users will be able to install these using the "extras" # syntax, for example:0 # # $ pip install sampleproject[dev] # # Similar to `install_requires` above, these must be valid existing # projects. # extras_require={ # Optional # 'dev': ['check-manifest'], # 'test': ['coverage'], # }, # If there are data files included in your packages that need to be # installed, specify them here. # # If using Python 2.6 or earlier, then these have to be included in # MANIFEST.in as well. # package_data={ # Optional # 'sample': ['package_data.dat'], # }, # Although 'package_data' is the preferred approach, in some case you may # need to place data files outside of your packages. See: # http://docs.python.org/3.4/distlib_utils/setupscript.html#installing-additional-files # # In this case, 'data_file' will be installed into '<sys.prefix>/my_data' # data_files=[('my_data', ['data/data_file'])], # Optional # To provide executable scripts, use entry points in preference to the # "scripts" keyword. Entry points provide cross-platform support and allow # `pip` to create the appropriate form of executable for the target # platform. # # For example, the following would provide a command called `sample` which # executes the function `main` from this package when invoked: # entry_points={ # Optional # 'console_scripts': [ # 'PTTLibrary=PTT:main', # ], # }, )
Truth0906/PTTCrawlerLibrary
setup.py
Python
lgpl-3.0
6,917
[ "VisIt" ]
8ede51bab5e2a8b75c27b44128676407b042364be40351e82f52a23e7fe545e6
import py import pytest import execnet from _pytest.pytester import HookRecorder from xdist import slavemanage, newhooks from xdist.slavemanage import HostRSync, NodeManager pytest_plugins = "pytester" def pytest_funcarg__hookrecorder(request, config): hookrecorder = HookRecorder(config.pluginmanager) if hasattr(hookrecorder, "start_recording"): hookrecorder.start_recording(newhooks) request.addfinalizer(hookrecorder.finish_recording) return hookrecorder def pytest_funcarg__config(testdir): return testdir.parseconfig() def pytest_funcarg__mysetup(tmpdir): class mysetup: source = tmpdir.mkdir("source") dest = tmpdir.mkdir("dest") return mysetup() @pytest.fixture def slavecontroller(monkeypatch): class MockController(object): def __init__(self, *args): pass def setup(self): pass monkeypatch.setattr(slavemanage, 'SlaveController', MockController) return MockController class TestNodeManagerPopen: def test_popen_no_default_chdir(self, config): gm = NodeManager(config, ["popen"]) assert gm.specs[0].chdir is None def test_default_chdir(self, config): l = ["ssh=noco", "socket=xyz"] for spec in NodeManager(config, l).specs: assert spec.chdir == "pyexecnetcache" for spec in NodeManager(config, l, defaultchdir="abc").specs: assert spec.chdir == "abc" def test_popen_makegateway_events(self, config, hookrecorder, slavecontroller): hm = NodeManager(config, ["popen"] * 2) hm.setup_nodes(None) call = hookrecorder.popcall("pytest_xdist_setupnodes") assert len(call.specs) == 2 call = hookrecorder.popcall("pytest_xdist_newgateway") assert call.gateway.spec == execnet.XSpec("popen") assert call.gateway.id == "gw0" call = hookrecorder.popcall("pytest_xdist_newgateway") assert call.gateway.id == "gw1" assert len(hm.group) == 2 hm.teardown_nodes() assert not len(hm.group) def test_popens_rsync(self, config, mysetup, slavecontroller): source = mysetup.source hm = NodeManager(config, ["popen"] * 2) hm.setup_nodes(None) assert len(hm.group) == 2 for gw in hm.group: class pseudoexec: args = [] def __init__(self, *args): self.args.extend(args) def waitclose(self): pass gw.remote_exec = pseudoexec l = [] for gw in hm.group: hm.rsync(gw, source, notify=lambda *args: l.append(args)) assert not l hm.teardown_nodes() assert not len(hm.group) assert "sys.path.insert" in gw.remote_exec.args[0] def test_rsync_popen_with_path(self, config, mysetup, slavecontroller): source, dest = mysetup.source, mysetup.dest hm = NodeManager(config, ["popen//chdir=%s" % dest] * 1) hm.setup_nodes(None) source.ensure("dir1", "dir2", "hello") l = [] for gw in hm.group: hm.rsync(gw, source, notify=lambda *args: l.append(args)) assert len(l) == 1 assert l[0] == ("rsyncrootready", hm.group['gw0'].spec, source) hm.teardown_nodes() dest = dest.join(source.basename) assert dest.join("dir1").check() assert dest.join("dir1", "dir2").check() assert dest.join("dir1", "dir2", 'hello').check() def test_rsync_same_popen_twice(self, config, mysetup, hookrecorder, slavecontroller): source, dest = mysetup.source, mysetup.dest hm = NodeManager(config, ["popen//chdir=%s" % dest] * 2) hm.roots = [] hm.setup_nodes(None) source.ensure("dir1", "dir2", "hello") gw = hm.group[0] hm.rsync(gw, source) call = hookrecorder.popcall("pytest_xdist_rsyncstart") assert call.source == source assert len(call.gateways) == 1 assert call.gateways[0] in hm.group call = hookrecorder.popcall("pytest_xdist_rsyncfinish") class TestHRSync: def test_hrsync_filter(self, mysetup): source, _ = mysetup.source, mysetup.dest # noqa source.ensure("dir", "file.txt") source.ensure(".svn", "entries") source.ensure(".somedotfile", "moreentries") source.ensure("somedir", "editfile~") syncer = HostRSync(source, ignores=NodeManager.DEFAULT_IGNORES) l = list(source.visit(rec=syncer.filter, fil=syncer.filter)) assert len(l) == 3 basenames = [x.basename for x in l] assert 'dir' in basenames assert 'file.txt' in basenames assert 'somedir' in basenames def test_hrsync_one_host(self, mysetup): source, dest = mysetup.source, mysetup.dest gw = execnet.makegateway("popen//chdir=%s" % dest) finished = [] rsync = HostRSync(source) rsync.add_target_host(gw, finished=lambda: finished.append(1)) source.join("hello.py").write("world") rsync.send() gw.exit() assert dest.join(source.basename, "hello.py").check() assert len(finished) == 1 class TestNodeManager: @py.test.mark.xfail(run=False) def test_rsync_roots_no_roots(self, testdir, mysetup): mysetup.source.ensure("dir1", "file1").write("hello") config = testdir.parseconfig(mysetup.source) nodemanager = NodeManager(config, ["popen//chdir=%s" % mysetup.dest]) #assert nodemanager.config.topdir == source == config.topdir nodemanager.makegateways() nodemanager.rsync_roots() p, = nodemanager.gwmanager.multi_exec( "import os ; channel.send(os.getcwd())").receive_each() p = py.path.local(p) py.builtin.print_("remote curdir", p) assert p == mysetup.dest.join(config.topdir.basename) assert p.join("dir1").check() assert p.join("dir1", "file1").check() def test_popen_rsync_subdir(self, testdir, mysetup, slavecontroller): source, dest = mysetup.source, mysetup.dest dir1 = mysetup.source.mkdir("dir1") dir2 = dir1.mkdir("dir2") dir2.ensure("hello") for rsyncroot in (dir1, source): dest.remove() nodemanager = NodeManager(testdir.parseconfig( "--tx", "popen//chdir=%s" % dest, "--rsyncdir", rsyncroot, source, )) nodemanager.setup_nodes(None) # calls .rsync_roots() if rsyncroot == source: dest = dest.join("source") assert dest.join("dir1").check() assert dest.join("dir1", "dir2").check() assert dest.join("dir1", "dir2", 'hello').check() nodemanager.teardown_nodes() def test_init_rsync_roots(self, testdir, mysetup, slavecontroller): source, dest = mysetup.source, mysetup.dest dir2 = source.ensure("dir1", "dir2", dir=1) source.ensure("dir1", "somefile", dir=1) dir2.ensure("hello") source.ensure("bogusdir", "file") source.join("tox.ini").write(py.std.textwrap.dedent(""" [pytest] rsyncdirs=dir1/dir2 """)) config = testdir.parseconfig(source) nodemanager = NodeManager(config, ["popen//chdir=%s" % dest]) nodemanager.setup_nodes(None) # calls .rsync_roots() assert dest.join("dir2").check() assert not dest.join("dir1").check() assert not dest.join("bogus").check() def test_rsyncignore(self, testdir, mysetup, slavecontroller): source, dest = mysetup.source, mysetup.dest dir2 = source.ensure("dir1", "dir2", dir=1) source.ensure("dir5", "dir6", "bogus") source.ensure("dir5", "file") dir2.ensure("hello") source.ensure("foo", "bar") source.ensure("bar", "foo") source.join("tox.ini").write(py.std.textwrap.dedent(""" [pytest] rsyncdirs = dir1 dir5 rsyncignore = dir1/dir2 dir5/dir6 foo* """)) config = testdir.parseconfig(source) config.option.rsyncignore = ['bar'] nodemanager = NodeManager(config, ["popen//chdir=%s" % dest]) nodemanager.setup_nodes(None) # calls .rsync_roots() assert dest.join("dir1").check() assert not dest.join("dir1", "dir2").check() assert dest.join("dir5", "file").check() assert not dest.join("dir6").check() assert not dest.join('foo').check() assert not dest.join('bar').check() def test_optimise_popen(self, testdir, mysetup, slavecontroller): source = mysetup.source specs = ["popen"] * 3 source.join("conftest.py").write("rsyncdirs = ['a']") source.ensure('a', dir=1) config = testdir.parseconfig(source) nodemanager = NodeManager(config, specs) nodemanager.setup_nodes(None) # calls .rysnc_roots() for gwspec in nodemanager.specs: assert gwspec._samefilesystem() assert not gwspec.chdir def test_ssh_setup_nodes(self, specssh, testdir): testdir.makepyfile(__init__="", test_x=""" def test_one(): pass """) reprec = testdir.inline_run("-d", "--rsyncdir=%s" % testdir.tmpdir, "--tx", specssh, testdir.tmpdir) rep, = reprec.getreports("pytest_runtest_logreport") assert rep.passed
ohmu/pytest-xdist
testing/test_slavemanage.py
Python
mit
9,551
[ "VisIt" ]
07d69012cf4ae622bbd60f75b6ac19b199ceb19fadfb3d2c62204b08e6d3b2b4
#!/usr/bin/env python ## ## @file translateMath.py ## @brief Translates infix formulas into MathML and vice-versa ## @author Sarah Keating ## @author Ben Bornstein ## ## ## This file is part of libSBML. Please visit http://sbml.org for more ## information about SBML, and the latest version of libSBML. ## import sys import time import os import os.path from libsbml import * # #Translates the given infix formula into MathML. # #@return the MathML as a string. The caller owns the memory and is #responsible for freeing it. # def translateInfix(formula): math = parseFormula(formula); return writeMathMLToString(math); # # Translates the given MathML into an infix formula. The MathML must # contain no leading whitespace, but an XML header is optional. # # @return the infix formula as a string. The caller owns the memory and # is responsible for freeing it. # def translateMathML(xml): math = readMathMLFromString(xml); return formulaToString(math); def main (args): """Usage: readSBML filename """ print("This program translates infix formulas into MathML and"); print("vice-versa. Enter or return on an empty line triggers"); print("translation. Ctrl-C quits"); sb = "" try: while True: print("Enter infix formula or MathML expression (Ctrl-C to quit):"); print "> ", line = sys.stdin.readline() while line != None: trimmed = line.strip(); length = len(trimmed); if (length > 0): sb = sb + trimmed; else: str = sb; result = "" if (str[0] == '<'): result = translateMathML(str) else: result = translateInfix(str) print("Result:\n\n" + result + "\n\n"); sb = ""; break; line = sys.stdin.readline() except: return 0; return 0; if __name__ == '__main__': main(sys.argv)
dilawar/moose-full
dependencies/libsbml-5.9.0/examples/python/translateMath.py
Python
gpl-2.0
2,002
[ "VisIt" ]
6e59dcc1721f54869154fd809ffe26c803cf2d0d646a023b70c51de9cb955e7b
"""@package gmm_clustering loads data, runs the gaussian mixture model filter with clustering tacked on """ import numpy as np import math import matplotlib.pyplot as plt import sys import time import scipy.stats as stats # for chi-sqaured functions import scipy.linalg # for sqrtm() function import cluster_processing sys.path.append('../') import cp_dynamics sys.path.append('../../filters/python/lib') sys.path.append('../../filters/python/gmm') import gmm sys.path.append('../sim_data') import data_loader def eqom_gmm(x,t,u=None): return cp_dynamics.eqom_det(x,t) def jac_gmm(x,t,u=None): return cp_dynamics.eqom_det_jac(x,t) def process_influence(x,t,u=None): return cp_dynamics.eqom_det_Gk(x,t) ## default measurement function for the case with linear position measurement def measurement_gmm(x,t,u=None): return np.array([x[0]]) def measurement_jac_gmm(x,t,u=None): return np.array([[1.0, 0.0]]) def measurement_jac_uninformative(x,t,u=None): return np.array([[2.0*x[0], 0.0]]) ## measurement function for the case with measurement of position squared with linear measurement noise def measurement_uninformative(x,t,u=None): return np.array([x[0]*x[0]]) ## Driver for the clustering #@param[out] xml the maximum likelihood estimate based on the PDF #@param[out] pdf nSteps x Np numpy array; the value of the pdf at discrete points in pdfPts #@param[out] pdfPts nSteps x 2 x Np numpy array; the points at which the PDF is evaluated at each time. Also corresponds to the aposteriori means in the GMM #@param[out] alphai the weights associated with each mean in the GMM at each time in the output #@param[out] Pki nSteps x 2 x 2 x Np numpy array; the covariance assocaited with each mean def gmm_test(dt,tf,mux0,P0,YK,Qk,Rk,Nsu=20,flag_informative=True): global nameBit # add in this functionality so we can change the propagation function dependent on the nameBit ... may or may not be needed if not flag_informative: measure_argument = measurement_uninformative measure_jacobian = measurement_jac_uninformative else: measure_argument = measurement_enkf measure_jacobian = measurement_jac_gmm if nameBit == 1: # create EnKF object GMM = gmm.gmm(2,Nsu,Qk,Rk,eqom_gmm,jac_gmm,process_influence,measure_argument,measure_jacobian) elif nameBit == 2: # create EnKF object GMM = gmm.gmm(2,Nsu,Qk,Rk,eqom_gmm,jac_gmm,process_influence,measure_argument,measure_jacobian) elif nameBit == 3: # create EnKF object GMM = gmm.gmm(2,Nsu,Qk,Rk,eqom_gmm,jac_gmm,process_influence,measure_argument,measure_jacobian) nSteps = int(tf/dt)+1 ts = 0.0 #initialize EnKF GMM.init_monte(mux0,P0,ts) xml = np.zeros((nSteps,2)) pdf = np.zeros((nSteps,GMM.aki.shape[1])) pdfPts = np.zeros((nSteps,2,GMM.aki.shape[1])) alphai = np.zeros((nSteps,GMM.aki.shape[1])) Pki = np.zeros((nSteps,2,2,GMM.aki.shape[1])) tk = np.arange(0.0,tf,dt) t1 = time.time() fig = [] for k in range(0,nSteps): if k > 0: # get the new measurement ym = np.array([YK[k]]) ts = ts + dt # sync the ENKF, with continuous-time integration print("Propagate to t = %f" % (ts)) # propagate filter GMM.propagate_normal(dt) GMM.update(ym) # log alphai[k,:] = GMM.alphai.copy() xml[k,:] = GMM.get_max_likelihood() Pki[k,:,:,:] = GMM.Pki.copy() (pdfPts[k,:,:],pdf[k,:]) = GMM.get_pdf() if k > 0: GMM.resample() t2 = time.time() print("Elapsed time: %f sec" % (t2-t1)) return(xml,pdf,pdfPts,alphai,Pki) def main(): global nameBit ## number of particles to use Nsu = 100 names = ['sims_01_bifurcation_noninformative'] flag_informative = False for namecounter in range(len(names)): nameNow = names[namecounter] (tsim,XK,YK,mu0,P0,Ns,dt,tf) = data_loader.load_data(nameNow,'../sim_data/') ''' tsim = tsim[0:5] XK = XK[0:5,:] YK = YK[0:5,:] tf = tsim[4] ''' Ns = 1 nameBit = int(nameNow[5:7],2) # parse the name if nameBit == 1: # noise levels for the ENKF with white noise forcing Qk = np.array([[1.0]]) Rk = np.array([[0.1]]) if nameBit == 2: # noise levels for the UKF with cosine forcing Qk = np.array([[3.16/dt]]) Rk = np.array([[0.1]]) # number of steps in each simulation nSteps = len(tsim) nees_history = np.zeros((nSteps,Ns)) Nf_history = np.zeros((nSteps,Ns)) e_sims = np.zeros((Ns*nSteps,2)) for counter in range(Ns): xk = XK[:,(2*counter):(2*counter+2)] yk = YK[:,counter] #(Xf,Pf,Idx,Xp) = gmm_test(dt,tf,mu0,P0,yk,Qk,Rk,flag_informative) (Xf,pdf,pdfPts,alphai,Pki) = gmm_test(dt,tf,mu0,P0,yk,Qk,Rk,Nsu,flag_informative) print("gmm_clustering case %d/%d" % (counter+1,Ns)) if Ns == 1: fig = [] for k in range(nSteps): fig.append(plt.figure()) ax = fig[k].add_subplot(1,1,1,title="t = %f" % (tsim[k])) # the number of active means activeMeans = pdf.shape[1] for jk in range(activeMeans): mux = pdfPts[k,:,jk] ax.plot(pdfPts[k,0,jk],pdfPts[k,1,jk],'o') # plot the single-mean covariance ellipsoid # draw points on a unit circle thetap = np.linspace(0,2*math.pi,20) circlP = np.zeros((20,2)) circlP[:,0] = 3.0*np.cos(thetap) circlP[:,1] = 3.0*np.sin(thetap) # transform the points circlP through P^(1/2)*circlP + mu Phalf = np.real(scipy.linalg.sqrtm(Pki[k,:,:,jk])) ellipsP = np.zeros(circlP.shape) for kj in range(circlP.shape[0]): ellipsP[kj,:] = np.dot(Phalf,circlP[kj,:])+mux ax.plot(ellipsP[:,0],ellipsP[:,1],'--') # plot the truth state ax.plot(xk[k,0],xk[k,1],'ks') ax.grid() fig[k].show() raw_input("Return to quit") for k in range(nSteps): plt.close(fig[k]) ''' (e1,chi2,mx,Pk) = cluster_processing.singleSimErrors(Xf,Idx,xk,yk) nees_history[:,counter] = chi2.copy() mean_nees = np.sum(chi2)/float(nSteps) print(mean_nees) # mean NEES mse = np.sum(np.power(e1,2.0),axis=0)/float(nSteps) e_sims[(counter*nSteps):(counter*nSteps+nSteps),:] = e1.copy() print("MSE: %f,%f" % (mse[0],mse[1])) if Ns < 2: # plot the mean trajectories and error fig1 = plt.figure() ax = [] for k in range(4): if k < 2: nam = 'x' + str(k+1) else: nam = 'e' + str(k-1) ax.append(fig1.add_subplot(2,2,k+1,ylabel=nam)) if k < 2: ax[k].plot(tsim,xk[:,k],'b-') ax[k].plot(tsim,mx[:,k],'m--') else: ax[k].plot(tsim,e1[:,k-2]) ax[k].plot(tsim,3.0*np.sqrt(Pk[:,k-2,k-2]),'r--') ax[k].plot(tsim,-3.0*np.sqrt(Pk[:,k-2,k-2]),'r--') ax[k].grid() fig1.show() mse_tot = np.mean(np.power(e_sims,2.0),axis=0) print("mse_tot: %f,%f" % (mse_tot[0],mse_tot[1])) # get the mean NEES value versus simulation time across all sims nees_mean = np.sum(nees_history,axis=1)/Ns # get the mean number of particles in time Nf_mean = np.sum(Nf_history,axis=1)/Ns # get 95% confidence bounds for chi-sqaured... the df is the number of sims times the dimension of the state chiUpper = stats.chi2.ppf(.975,2.0*Ns)/float(Ns) chiLower = stats.chi2.ppf(.025,2.0*Ns)/float(Ns) # plot the mean NEES with the 95% confidence bounds fig2 = plt.figure(figsize=(6.0,3.37)) #figsize tuple is width, height tilt = "ENKF, Ts = %.2f, %d sims, " % (dt, Ns) if nameBit == 0: tilt = tilt + 'unforced' if nameBit == 1: #white-noise only tilt = tilt + 'white-noise forcing' if nameBit == 2: tilt = tilt + 'cosine forcing' if nameBit == 3: #white-noise and cosine forcing tilt = tilt + 'white-noise and cosine forcing' ax = fig2.add_subplot(111,ylabel='mean NEES',title=tilt) ax.plot(tsim,chiUpper*np.ones(nSteps),'r--') ax.plot(tsim,chiLower*np.ones(nSteps),'r--') ax.plot(tsim,nees_mean,'b-') ax.grid() fig2.show() # save the figure fig2.savefig('nees_enkf2_' + str(Ns) + '_' + nameNow + '.png') # find fraction of inliers l1 = (nees_mean < chiUpper).nonzero()[0] l2 = (nees_mean > chiLower).nonzero()[0] # get number of inliers len_in = len(set(l1).intersection(l2)) # get number of super (above) liers (sic) len_super = len((nees_mean > chiUpper).nonzero()[0]) # get number of sub-liers (below) len_sub = len((nees_mean < chiLower).nonzero()[0]) print("Conservative (below 95%% bounds): %f" % (float(len_sub)/float(nSteps))) print("Optimistic (above 95%% bounds): %f" % (float(len_super)/float(nSteps))) # save metrics FID = open('metrics_enkf2_' + str(Ns) + '_' + nameNow + '.txt','w') FID.write("mse1,mse2,nees_below95,nees_above95\n") FID.write("%f,%f,%f,%f\n" % (mse_tot[0],mse_tot[1],float(len_sub)/float(nSteps),float(len_super)/float(nSteps))) FID.close() raw_input("Return to exit") ''' return if __name__ == "__main__": main()
fatadama/estimation
challenge_problem/trials_clustering/gmm_clustering.py
Python
gpl-2.0
8,690
[ "Gaussian" ]
55f6b136a541d48f79e87f0ce71e31679aa5d8977fec2b5e6db87b7b9a5abb10
#objects.py from buckPasser.sqlTable import SQLTable, StagedSqlTable from buckPasser.inventory import PassiveInventory from . import userInput import os from buckPasser.menus import ObjectMenu def objectFactory(db, code, stage): obj = Objects(db) obj.setCode(code) obj.readFromDB(stage) obj.menu.title = obj.objName.value.title() obj.menu.description = obj.shortDescrip.value obj.menu.longDescrip = obj.descrip.value obj.menu.cursor = "{} > ".format(obj.objName.value) obj.inventory = PassiveInventory(db) obj.inventory.setCode(obj.inventoryCode.value) obj.inventory.menu.title = obj.objName.value.title() obj.inventory.readFromDB() obj.menu.commands.update({obj.useAlias.value.lower(): userInput.Command(func=obj.use, descrip = obj.useDescrip.value, takesArgs=False)}) return obj class Objects(StagedSqlTable): ''' Objects is the base class for all interactable objects in the game. Each object should have its own commands so you can say flush toilet or logon for the computer, read for the magazine etc. You enter into an object list menu by typing inspect. ''' def __init__(self, db, title = "Object"): StagedSqlTable.__init__(self, db) self.code = None self.stage = self.elementTable.addElement(title = 'Game Stage', name = 'stage', value = None, elementType = 'INT') self.objName = self.elementTable.addElement(title = 'Objects Name', name = 'objName', value = None, elementType = 'STRING', updatable = False) self.descrip = self.elementTable.addElement(title = 'Object Description', name = 'descrip', value = None, elementType = 'STRING', updatable = False) self.shortDescrip = self.elementTable.addElement(title = 'Short Description', name = 'shortDescrip', value = None, elementType = 'STRING', updatable = False) self.useAlias = self.elementTable.addElement(title = 'Object alias for the use method', name = 'useAlias', value = None, elementType = 'STRING') self.useDescrip = self.elementTable.addElement(title = 'Object use method description', name = 'useDescrip', value = None, elementType = 'STRING') self.usePrint = self.elementTable.addElement(title = 'What to print on use', name = 'usePrint', value = None, elementType = 'STRING') self.inventoryCode = self.elementTable.addElement(title = 'Items in Object', name = 'inventoryCode', value = None, elementType = 'INT') self.interactedFlag = self.elementTable.addElement(title = 'Object interacted with this stage', name = 'interactedFlag', value = None, elementType= 'INT') self.inventory = None self.table = 'objects' self.codeName = 'objCode' self.menu = ObjectMenu(db = db) self.commands = { 'search':userInput.Command(func=self.search, descrip = "Search for items",takesArgs=False, hide = False), 'inspect': userInput.Command(func=self.inspect, takesArgs=False, hide = False), 'describe':userInput.Command(func=self.describe, takesArgs=False, hide = True) } self.menuCommands = { 'search':userInput.Command(func=self.search, descrip = "Search for items",takesArgs=False, hide = False), 'describe':userInput.Command(func=self.describe, takesArgs=False, hide = True) #'use':userInput.Command(func=self.use, takesArgs=False, hide = True) } self.menu.commands.update(self.menuCommands) def inspect(self): self.menu.runMenu() def describe(self): userInput.printToScreen("\n{0.objName.value}\n-------------------\n{0.descrip.value}".format(self)) def search(self): self.inventory.runMenu() def use(self): if self.usePrint.value in ['', 'NULL','None', None]: userInput.printToScreen('That doesn\'t serve a purpose, just like your sorry ass.') else: userInput.printToScreen(self.usePrint.value) self.interactedFlag.value = True #userInput.printToScreen('Who would visit this website? Why does this dirt bag have it set as his home screen? Some questions are not meant to be answered.')
snhobbs/DetectiveBuckPasser
buckPasser/objects.py
Python
unlicense
3,863
[ "VisIt" ]
64ad3ad0cec7d556a85c2eae51189a8f3eed80dbc9a5a25f12491ab4590666be
import re from Bio import Entrez def blast_config(): access_regex = re.compile("[A-Z]{2}_*[0-9]+[.][0-9]") blast_db = "refseq_rna" blast_type = "blastn" e_value = 1 hitlist_size = 10 species = "Mus musculus" species_regex = re.compile("Mus.+") #align_min = 0 Setup a min and max align #align_max = 0 size for BLAST hits. return (access_regex, species_regex, species, blast_type, blast_db, hitlist_size, e_value) def retrieve_config(): email = "acc@server.com" database = "nuccore" rtype = "gb" rmode = "text" return (Entrez.email, database, rtype, rmode) if __name__ == "__main__": print("Configuration script for pyblast.py.")
julioscalves/pyblast
pyblast_config.py
Python
mit
691
[ "BLAST" ]
0b3bfc19c9fed3650568427b82a270adb874b7abcc7b4c4014b27d6809884437
""" Test the coordinates class that represents the plane of orbit of the Sgr dwarf galaxy. """ # Third-party import astropy.coordinates as coord import astropy.units as u from astropy.utils.data import get_pkg_data_filename import numpy as np # This package from ..sgr import SagittariusLaw10, Sagittarius def test_simple(): c = coord.ICRS(coord.Angle(217.2141, u.degree), coord.Angle(-11.4351, u.degree)) c.transform_to(SagittariusLaw10) c = coord.Galactic(coord.Angle(217.2141, u.degree), coord.Angle(-11.4351, u.degree)) c.transform_to(SagittariusLaw10) c = SagittariusLaw10(coord.Angle(217.2141, u.degree), coord.Angle(-11.4351, u.degree)) c.transform_to(coord.ICRS) c.transform_to(coord.Galactic) c = coord.Galactic(coord.Angle(217.2141, u.degree), coord.Angle(-11.4351, u.degree)) c.transform_to(SagittariusLaw10) # with distance c = SagittariusLaw10(coord.Angle(217.2141, u.degree), coord.Angle(-11.4351, u.degree), distance=15*u.kpc) c.transform_to(coord.ICRS) c2 = c.transform_to(coord.Galactic) assert np.allclose(c2.distance.value, c.distance.value) # TODO: remove this in next version # For now: make sure old class still works from astropy.tests.helper import catch_warnings with catch_warnings(DeprecationWarning) as w: c = Sagittarius(217.2141*u.degree, -11.4351*u.degree) assert len(w) > 0 c2 = c.transform_to(coord.Galactic) c3 = c2.transform_to(Sagittarius) assert np.allclose(c3.Lambda.degree, c.Lambda.degree) assert np.allclose(c3.Beta.degree, c.Beta.degree) def test_against_David_Law(): """ Test my code against an output file from using David Law's cpp code. Do: g++ SgrCoord.cpp; ./a.out to generate the data file, SgrCoord_data. """ filename = get_pkg_data_filename('SgrCoord_data') law_data = np.genfromtxt(filename, names=True, delimiter=',') c = coord.Galactic(law_data["l"]*u.deg, law_data["b"]*u.deg) sgr_coords = c.transform_to(SagittariusLaw10) law_sgr_coords = SagittariusLaw10(Lambda=law_data["lambda"]*u.deg, Beta=law_data["beta"]*u.deg) sep = sgr_coords.separation(law_sgr_coords).arcsec*u.arcsec assert np.all(sep < 1.*u.arcsec)
adrn/gary
gala/coordinates/tests/test_sgr.py
Python
mit
2,422
[ "Galaxy" ]
a500ae8d8f09fa74ab442f271dc005f90282566ba203c7231017545ff5249634
# ---------------------------------------------------------------------------- # Copyright (c) 2016-2021, QIIME 2 development team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file LICENSE, distributed with this software. # ---------------------------------------------------------------------------- import os.path import collections import urllib.parse import pkg_resources import itertools import tempfile import subprocess import skbio import skbio.diversity import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from statsmodels.sandbox.stats.multicomp import multipletests import qiime2 import q2templates from natsort import natsorted from patsy import ModelDesc TEMPLATES = pkg_resources.resource_filename('q2_diversity', '_beta') def bioenv(output_dir: str, distance_matrix: skbio.DistanceMatrix, metadata: qiime2.Metadata) -> None: # Filter metadata to only include IDs present in the distance matrix. # Also ensures every distance matrix ID is present in the metadata. metadata = metadata.filter_ids(distance_matrix.ids) # drop non-numeric columns and empty columns pre_filtered_cols = set(metadata.columns) metadata = metadata.filter_columns(column_type='numeric') non_numeric_cols = pre_filtered_cols - set(metadata.columns) # Drop samples that have any missing values. # TODO use Metadata API if more filtering is supported in the future. df = metadata.to_dataframe() df = df.dropna() metadata = qiime2.Metadata(df) # filter 0 variance numerical columns and empty columns pre_filtered_cols = set(metadata.columns) metadata = metadata.filter_columns(drop_zero_variance=True, drop_all_missing=True) zero_variance_cols = pre_filtered_cols - set(metadata.columns) df = metadata.to_dataframe() # filter the distance matrix to exclude samples that were dropped from # the metadata, and keep track of how many samples survived the filtering # so that information can be presented to the user. initial_dm_length = distance_matrix.shape[0] distance_matrix = distance_matrix.filter(df.index) filtered_dm_length = distance_matrix.shape[0] result = skbio.stats.distance.bioenv(distance_matrix, df) result = q2templates.df_to_html(result) index = os.path.join(TEMPLATES, 'bioenv_assets', 'index.html') q2templates.render(index, output_dir, context={ 'initial_dm_length': initial_dm_length, 'filtered_dm_length': filtered_dm_length, 'non_numeric_cols': ', '.join(sorted(non_numeric_cols)), 'zero_variance_cols': ', '.join(sorted(zero_variance_cols)), 'result': result}) _beta_group_significance_fns = {'permanova': skbio.stats.distance.permanova, 'anosim': skbio.stats.distance.anosim, 'permdisp': skbio.stats.distance.permdisp} def _get_distance_boxplot_data(distance_matrix, group_id, groupings): x_ticklabels = [] all_group_distances = [] # extract the within group distances within_group_distances = [] pairs_summary = [] group = groupings[group_id] for i, sid1 in enumerate(group): for sid2 in group[:i]: dist = distance_matrix[sid1, sid2] within_group_distances.append(dist) pairs_summary.append((sid1, sid2, group_id, group_id, dist)) x_ticklabels.append('%s (n=%d)' % (group_id, len(within_group_distances))) all_group_distances.append(within_group_distances) # extract between group distances for group to each other group for other_group_id, other_group in groupings.items(): between_group_distances = [] if group_id == other_group_id: continue for sid1 in group: for sid2 in other_group: dist = distance_matrix[sid1, sid2] between_group_distances.append(dist) pairs_summary.append( (sid1, sid2, group_id, other_group_id, dist)) x_ticklabels.append('%s (n=%d)' % (other_group_id, len(between_group_distances))) all_group_distances.append(between_group_distances) return all_group_distances, x_ticklabels, pairs_summary def _get_pairwise_group_significance_stats( distance_matrix, group1_id, group2_id, groupings, metadata, beta_group_significance_fn, permutations): group1_group2_samples = groupings[group1_id] + groupings[group2_id] metadata = metadata[group1_group2_samples] distance_matrix = distance_matrix.filter(group1_group2_samples) return beta_group_significance_fn(distance_matrix, metadata, permutations=permutations) def beta_group_significance(output_dir: str, distance_matrix: skbio.DistanceMatrix, metadata: qiime2.CategoricalMetadataColumn, method: str = 'permanova', pairwise: bool = False, permutations: int = 999) -> None: try: beta_group_significance_fn = _beta_group_significance_fns[method] except KeyError: raise ValueError('Unknown group significance method %s. The available ' 'options are %s.' % (method, ', '.join(_beta_group_significance_fns))) # Filter metadata to only include IDs present in the distance matrix. # Also ensures every distance matrix ID is present in the metadata. metadata = metadata.filter_ids(distance_matrix.ids) metadata = metadata.drop_missing_values() # filter the distance matrix to exclude samples that were dropped from # the metadata due to missing values, and keep track of how many samples # survived the filtering so that information can be presented to the user. initial_dm_length = distance_matrix.shape[0] distance_matrix = distance_matrix.filter(metadata.ids) filtered_dm_length = distance_matrix.shape[0] metadata = metadata.to_series() # Run the significance test result = beta_group_significance_fn(distance_matrix, metadata, permutations=permutations) # Generate distance boxplots sns.set_style('white') # Identify the groups, then compute the within group distances and the # between group distances, and generate one boxplot per group. # groups will be an OrderedDict mapping group id to the sample ids in that # group. The order is used both on the x-axis, and in the layout of the # boxplots in the visualization. # TODO: update to use a grouping API and natsort API on # CategoricalMetadataColumn, if those become available. groupings = collections.OrderedDict( [(id, list(series.index)) for id, series in natsorted(metadata.groupby(metadata))]) pairs_summary = pd.DataFrame(columns=['SubjectID1', 'SubjectID2', 'Group1', 'Group2', 'Distance']) for group_id in groupings: group_distances, x_ticklabels, group_pairs_summary = \ _get_distance_boxplot_data(distance_matrix, group_id, groupings) group_pairs_summary = pd.DataFrame( group_pairs_summary, columns=['SubjectID1', 'SubjectID2', 'Group1', 'Group2', 'Distance']) pairs_summary = pd.concat([pairs_summary, group_pairs_summary]) ax = sns.boxplot(data=group_distances, flierprops={ 'marker': 'o', 'markeredgecolor': 'black', 'markeredgewidth': 0.5, 'alpha': 0.5}) ax.set_xticklabels(x_ticklabels, rotation=90) ax.set_xlabel('Group') ax.set_ylabel('Distance') ax.set_title('Distances to %s' % group_id) # change the color of the boxes to white for box in ax.artists: box.set_facecolor('white') sns.despine() plt.tight_layout() fig = ax.get_figure() fig.savefig(os.path.join(output_dir, '%s-boxplots.png' % urllib.parse.quote(str(group_id)))) fig.savefig(os.path.join(output_dir, '%s-boxplots.pdf' % urllib.parse.quote(str(group_id)))) fig.clear() pairs_summary.to_csv(os.path.join(output_dir, 'raw_data.tsv'), sep='\t') result_html = q2templates.df_to_html(result.to_frame()) if pairwise: pairwise_results = [] for group1_id, group2_id in itertools.combinations(groupings, 2): pairwise_result = \ _get_pairwise_group_significance_stats( distance_matrix=distance_matrix, group1_id=group1_id, group2_id=group2_id, groupings=groupings, metadata=metadata, beta_group_significance_fn=beta_group_significance_fn, permutations=permutations) pairwise_results.append([group1_id, group2_id, pairwise_result['sample size'], permutations, pairwise_result['test statistic'], pairwise_result['p-value']]) columns = ['Group 1', 'Group 2', 'Sample size', 'Permutations', result['test statistic name'], 'p-value'] pairwise_results = pd.DataFrame(pairwise_results, columns=columns) pairwise_results.set_index(['Group 1', 'Group 2'], inplace=True) pairwise_results['q-value'] = multipletests( pairwise_results['p-value'], method='fdr_bh')[1] pairwise_results.sort_index(inplace=True) pairwise_path = os.path.join( output_dir, '%s-pairwise.csv' % method) pairwise_results.to_csv(pairwise_path) pairwise_results_html = q2templates.df_to_html(pairwise_results) else: pairwise_results_html = None # repartition groupings for rendering group_ids = [ # We have to DOUBLE encode this, as the file/resource name is a literal # URI-encoded string, we do this to prevent issues with the filesystem # however, as a result, our links need to escape % so that the browser # asks for the right escaped name (instead of the original name, which # doesn't exist inside the visualization). urllib.parse.quote(urllib.parse.quote(k)) for k in groupings.keys() ] row_count, group_count = 3, len(group_ids) # Start at three plots per row while group_count % row_count != 0: row_count = row_count - 1 group_rows = [group_ids[g:g+row_count] for g in range(0, group_count, row_count)] index = os.path.join( TEMPLATES, 'beta_group_significance_assets', 'index.html') q2templates.render(index, output_dir, context={ 'initial_dm_length': initial_dm_length, 'filtered_dm_length': filtered_dm_length, 'method': method, 'group_rows': group_rows, 'bootstrap_group_col_size': int(12 / row_count), 'result': result_html, 'pairwise_results': pairwise_results_html }) def mantel(output_dir: str, dm1: skbio.DistanceMatrix, dm2: skbio.DistanceMatrix, method: str = 'spearman', permutations: int = 999, intersect_ids: bool = False, label1: str = 'Distance Matrix 1', label2: str = 'Distance Matrix 2') -> None: test_statistics = {'spearman': 'rho', 'pearson': 'r'} alt_hypothesis = 'two-sided' # The following code to handle mismatched IDs, and subsequently filter the # distance matrices, is not technically necessary because skbio's mantel # function will raise an error on mismatches with `strict=True`, and will # handle intersection if `strict=False`. However, we need to handle the ID # matching explicitly to find *which* IDs are mismatched -- the error # message coming from scikit-bio doesn't describe those. We also need to # have the mismatched IDs to display as a warning in the viz if # `intersect_ids=True`. Finally, the distance matrices are explicitly # filtered to matching IDs only because their data are used elsewhere in # this function (e.g. extracting scatter plot data). # Find the symmetric difference between ID sets. ids1 = set(dm1.ids) ids2 = set(dm2.ids) mismatched_ids = ids1 ^ ids2 if not intersect_ids and mismatched_ids: raise ValueError( 'The following ID(s) are not contained in both distance matrices. ' 'This sometimes occurs when mismatched files are passed. If this ' 'is not the case, you can use `intersect_ids` to discard these ' 'mismatches and apply the Mantel test to only those IDs that are ' 'found in both distance matrices.\n\n%s' % ', '.join(sorted(mismatched_ids))) if mismatched_ids: matched_ids = ids1 & ids2 # Run in `strict` mode because the matches should all be found in both # matrices. dm1 = dm1.filter(matched_ids, strict=True) dm2 = dm2.filter(matched_ids, strict=True) # Run in `strict` mode because all IDs should be matched at this point. r, p, sample_size = skbio.stats.distance.mantel( dm1, dm2, method=method, permutations=permutations, alternative=alt_hypothesis, strict=True) result = pd.Series([method.title(), sample_size, permutations, alt_hypothesis, r, p], index=['Method', 'Sample size', 'Permutations', 'Alternative hypothesis', '%s %s' % (method.title(), test_statistics[method]), 'p-value'], name='Mantel test results') table_html = q2templates.df_to_html(result.to_frame()) # We know the distance matrices have matching ID sets at this point, so we # can safely generate all pairs of IDs using one of the matrices' ID sets # (it doesn't matter which one). scatter_data = [] for id1, id2 in itertools.combinations(dm1.ids, 2): scatter_data.append((dm1[id1, id2], dm2[id1, id2])) plt.figure() x = 'Pairwise Distance (%s)' % label1 y = 'Pairwise Distance (%s)' % label2 scatter_data = pd.DataFrame(scatter_data, columns=[x, y]) sns.regplot(x=x, y=y, data=scatter_data, fit_reg=False) plt.savefig(os.path.join(output_dir, 'mantel-scatter.svg')) context = { 'table': table_html, 'sample_size': sample_size, 'mismatched_ids': mismatched_ids } index = os.path.join( TEMPLATES, 'mantel_assets', 'index.html') q2templates.render(index, output_dir, context=context) def adonis(output_dir: str, distance_matrix: skbio.DistanceMatrix, metadata: qiime2.Metadata, formula: str, permutations: int = 999, n_jobs: int = 1) -> None: # Validate sample metadata is superset et cetera metadata_ids = set(metadata.ids) dm_ids = distance_matrix.ids _validate_metadata_is_superset(metadata_ids, set(dm_ids)) # filter ids. ids must be in same order as dm filtered_md = metadata.to_dataframe().reindex(dm_ids) filtered_md.index.name = 'sample-id' metadata = qiime2.Metadata(filtered_md) # Validate formula terms = ModelDesc.from_formula(formula) for t in terms.rhs_termlist: for i in t.factors: column = metadata.get_column(i.name()) if column.has_missing_values(): raise ValueError('adonis requires metadata columns with no ' 'NaN values (missing values in column `%s`.)' % (column.name, )) # Run adonis results_fp = os.path.join(output_dir, 'adonis.tsv') with tempfile.TemporaryDirectory() as temp_dir_name: dm_fp = os.path.join(temp_dir_name, 'dm.tsv') distance_matrix.write(dm_fp) md_fp = os.path.join(temp_dir_name, 'md.tsv') metadata.save(md_fp) cmd = ['run_adonis.R', dm_fp, md_fp, formula, str(permutations), str(n_jobs), results_fp] _run_command(cmd) # Visualize results results = pd.read_csv(results_fp, sep='\t') results = q2templates.df_to_html(results) index = os.path.join(TEMPLATES, 'adonis_assets', 'index.html') q2templates.render(index, output_dir, context={'results': results}) def _validate_metadata_is_superset(metadata_ids, other_ids): missing_ids = other_ids.difference(metadata_ids) if len(missing_ids) > 0: raise ValueError('Missing samples in metadata: %r' % missing_ids) # Replace this function with QIIME2 API for wrapping commands/binaries, # pending https://github.com/qiime2/qiime2/issues/224 def _run_command(cmd, verbose=True): if verbose: print("Running external command line application. This may print " "messages to stdout and/or stderr.") print("The command being run is below. This command cannot " "be manually re-run as it will depend on temporary files that " "no longer exist.") print("\nCommand:", end=' ') print(" ".join(cmd), end='\n\n') subprocess.run(cmd, check=True)
jakereps/q2-diversity
q2_diversity/_beta/_visualizer.py
Python
bsd-3-clause
17,634
[ "scikit-bio" ]
ad7c772beffede478f56a546a0d2a602d428e182c4ae865d423cea2a17afdfb1
from __future__ import absolute_import, division, print_function, unicode_literals import unittest import argparse from AdaptivePELE.tests import testSpawning as tSpawning from AdaptivePELE.tests import testAtomset as tAtomset from AdaptivePELE.tests import testClustering as tClustering from AdaptivePELE.tests import testAdaptiveSampling as tAdaptive from AdaptivePELE.tests import testThresholdcalculator as tThreshold from AdaptivePELE.tests import testDensityCalculator as tDensity from AdaptivePELE.tests import testMD as tMD from AdaptivePELE.tests import testMD_CUDA as tMD_CUDA try: from AdaptivePELE.tests import testReporter as tR except ImportError: pass def parse_args(): desc = ("Run testing suite. Possible options are:\na -- Run all tests\n" "at -- Run atomset tests\ns -- Run spawning tests\nth -- Run threshold " "calculator tests\nd -- Run density tests\nc -- Run clustering tests\n" "Ad -- Run adaptive integration tests\nMD -- Run adaptive MD tests\nMD_CUDA" " -- Run adaptive MD tests with CUDA\nR -- Run reporter tests\n") parser = argparse.ArgumentParser(description=desc, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument("--run", default=None, nargs="*", help="Tests to run") parser.add_argument("--exclude", default=[], nargs="*", help="Tests to exclude") args = parser.parse_args() return args.run, args.exclude def main(run, exclude): testSuite = unittest.TestSuite() if run is None: run = ["at", "s", "th", "d", "c", "Ad", "MD", "MD_CUDA", "R"] to_run = set(run)-set(exclude) if "at" in to_run or "a" in to_run: print("Will run atomset tests") testSuite.addTest(unittest.makeSuite(tAtomset.atomsetTest)) if "s" in to_run or "a" in to_run: print("Will run spawning tests") testSuite.addTest(unittest.makeSuite(tSpawning.TestSpawningCalculator)) if "th" in to_run or "a" in to_run: print("Will run threshold tests") testSuite.addTest(unittest.makeSuite(tThreshold.thresholdCalculatorTest)) if "d" in to_run or "a" in to_run: print("Will run denstity tests") testSuite.addTest(unittest.makeSuite(tDensity.densityCalculatorTest)) if "c" in to_run or "a" in to_run: print("Will run clustering tests") testSuite.addTest(unittest.makeSuite(tClustering.clusteringTest)) if "Ad" in to_run or "a" in to_run: print("Will run integration tests") testSuite.addTest(unittest.makeSuite(tAdaptive.TestadaptiveSampling)) if "MD" in to_run or "a" in to_run: print("Will run integration tests with md") testSuite.addTest(unittest.makeSuite(tMD.TestMD)) if "MD_CUDA" in to_run or "a" in to_run: print("Will run integration tests with md in CUDA") testSuite.addTest(unittest.makeSuite(tMD_CUDA.TestMD_CUDA)) if "R" in to_run or "a" in to_run: print("Will run repoter tests for OpenMM") testSuite.addTest(unittest.makeSuite(tR.TestReporter)) runner = unittest.TextTestRunner() runner.run(testSuite) if __name__ == "__main__": run_list, exclude_list = parse_args() main(run_list, exclude_list)
AdaptivePELE/AdaptivePELE
AdaptivePELE/runAllTests.py
Python
mit
3,226
[ "OpenMM" ]
8701b197defe5a3d5aa64fdd5e8f03f0f3cb83ad8ecd293ae2fc6691009fef29
# coding: utf-8 # Copyright (c) Pymatgen Development Team. # Distributed under the terms of the MIT License. """ Module for graph representations of crystals. """ import warnings import subprocess import numpy as np import os.path import copy from itertools import combinations from pymatgen.core import Structure, Lattice, PeriodicSite, Molecule from pymatgen.core.structure import FunctionalGroups from pymatgen.util.coord import lattice_points_in_supercell from pymatgen.vis.structure_vtk import EL_COLORS from monty.json import MSONable from monty.os.path import which from operator import itemgetter from collections import namedtuple, defaultdict from scipy.spatial import KDTree from scipy.stats import describe import networkx as nx import networkx.algorithms.isomorphism as iso from networkx.readwrite import json_graph from networkx.drawing.nx_agraph import write_dot try: import igraph IGRAPH_AVAILABLE = True except ImportError: IGRAPH_AVAILABLE = False import logging logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) __author__ = "Matthew Horton, Evan Spotte-Smith, Samuel Blau" __version__ = "0.1" __maintainer__ = "Matthew Horton" __email__ = "mkhorton@lbl.gov" __status__ = "Production" __date__ = "August 2017" ConnectedSite = namedtuple('ConnectedSite', 'site, jimage, index, weight, dist') def _compare(g1, g2, i1, i2): """ Helper function called by isomorphic to ensure comparison of node identities. """ return g1.vs[i1]['species'] == g2.vs[i2]['species'] def _igraph_from_nxgraph(graph): """ Helper function that converts a networkx graph object into an igraph graph object. """ nodes = graph.nodes(data=True) new_igraph = igraph.Graph() for node in nodes: new_igraph.add_vertex(name=str(node[0]), species=node[1]["specie"], coords=node[1]["coords"]) new_igraph.add_edges([(str(edge[0]), str(edge[1])) for edge in graph.edges()]) return new_igraph def _isomorphic(frag1, frag2): """ Internal function to check if two graph objects are isomorphic, using igraph if if is available and networkx if it is not. """ f1_nodes = frag1.nodes(data=True) f2_nodes = frag2.nodes(data=True) if len(f1_nodes) != len(f2_nodes): return False f2_edges = frag2.edges() if len(f2_edges) != len(f2_edges): return False f1_comp_dict = {} f2_comp_dict = {} for node in f1_nodes: if node[1]["specie"] not in f1_comp_dict: f1_comp_dict[node[1]["specie"]] = 1 else: f1_comp_dict[node[1]["specie"]] += 1 for node in f2_nodes: if node[1]["specie"] not in f2_comp_dict: f2_comp_dict[node[1]["specie"]] = 1 else: f2_comp_dict[node[1]["specie"]] += 1 if f1_comp_dict != f2_comp_dict: return False if IGRAPH_AVAILABLE: ifrag1 = _igraph_from_nxgraph(frag1) ifrag2 = _igraph_from_nxgraph(frag2) return ifrag1.isomorphic_vf2(ifrag2, node_compat_fn=_compare) else: nm = iso.categorical_node_match("specie", "ERROR") return nx.is_isomorphic(frag1.to_undirected(), frag2.to_undirected(), node_match=nm) class StructureGraph(MSONable): """ This is a class for annotating a Structure with bond information, stored in the form of a graph. A "bond" does not necessarily have to be a chemical bond, but can store any kind of information that connects two Sites. """ def __init__(self, structure, graph_data=None): """ If constructing this class manually, use the `with_empty_graph` method or `with_local_env_strategy` method (using an algorithm provided by the `local_env` module, such as O'Keeffe). This class that contains connection information: relationships between sites represented by a Graph structure, and an associated structure object. This class uses the NetworkX package to store and operate on the graph itself, but contains a lot of helper methods to make associating a graph with a given crystallographic structure easier. Use cases for this include storing bonding information, NMR J-couplings, Heisenberg exchange parameters, etc. For periodic graphs, class stores information on the graph edges of what lattice image the edge belongs to. :param structure: a Structure object :param graph_data: dict containing graph information in dict format (not intended to be constructed manually, see as_dict method for format) """ if isinstance(structure, StructureGraph): # just make a copy from input graph_data = structure.as_dict()['graphs'] self.structure = structure self.graph = nx.readwrite.json_graph.adjacency_graph(graph_data) # tidy up edge attr dicts, reading to/from json duplicates # information for u, v, k, d in self.graph.edges(keys=True, data=True): if 'id' in d: del d['id'] if 'key' in d: del d['key'] # ensure images are tuples (conversion to lists happens # when serializing back from json), it's important images # are hashable/immutable if 'to_jimage' in d: d['to_jimage'] = tuple(d['to_jimage']) if 'from_jimage' in d: d['from_jimage'] = tuple(d['from_jimage']) @classmethod def with_empty_graph(cls, structure, name="bonds", edge_weight_name=None, edge_weight_units=None): """ Constructor for StructureGraph, returns a StructureGraph object with an empty graph (no edges, only nodes defined that correspond to Sites in Structure). :param structure (Structure): :param name (str): name of graph, e.g. "bonds" :param edge_weight_name (str): name of edge weights, e.g. "bond_length" or "exchange_constant" :param edge_weight_units (str): name of edge weight units e.g. "Å" or "eV" :return (StructureGraph): """ if edge_weight_name and (edge_weight_units is None): raise ValueError("Please specify units associated " "with your edge weights. Can be " "empty string if arbitrary or " "dimensionless.") # construct graph with one node per site # graph attributes don't change behavior of graph, # they're just for book-keeping graph = nx.MultiDiGraph(edge_weight_name=edge_weight_name, edge_weight_units=edge_weight_units, name=name) graph.add_nodes_from(range(len(structure))) graph_data = json_graph.adjacency_data(graph) return cls(structure, graph_data=graph_data) @staticmethod def with_edges(structure, edges): """ Constructor for MoleculeGraph, using pre-existing or pre-defined edges with optional edge parameters. :param molecule: Molecule object :param edges: dict representing the bonds of the functional group (format: {(from_index, to_index, from_image, to_image): props}, where props is a dictionary of properties, including weight. Props should be None if no additional properties are to be specified. :return: sg, a StructureGraph """ sg = StructureGraph.with_empty_graph(structure, name="bonds", edge_weight_name="weight", edge_weight_units="") for edge, props in edges.items(): try: from_index = edge[0] to_index = edge[1] from_image = edge[2] to_image = edge[3] except TypeError: raise ValueError("Edges must be given as (from_index, to_index," " from_image, to_image) tuples") if props is not None: if "weight" in props.keys(): weight = props["weight"] del props["weight"] else: weight = None if len(props.items()) == 0: props = None else: weight = None nodes = sg.graph.nodes if not (from_index in nodes and to_index in nodes): raise ValueError("Edges cannot be added if nodes are not" " present in the graph. Please check your" " indices.") sg.add_edge(from_index, to_index, from_jimage=from_image, to_jimage=to_image, weight=weight, edge_properties=props) sg.set_node_attributes() return sg @staticmethod def with_local_env_strategy(structure, strategy, weights=False): """ Constructor for StructureGraph, using a strategy from :Class: `pymatgen.analysis.local_env`. :param structure: Structure object :param strategy: an instance of a :Class: `pymatgen.analysis.local_env.NearNeighbors` object :param weights: if True, use weights from local_env class (consult relevant class for their meaning) :return: """ sg = StructureGraph.with_empty_graph(structure, name="bonds") for n, neighbors in enumerate(strategy.get_all_nn_info(structure)): for neighbor in neighbors: # local_env will always try to add two edges # for any one bond, one from site u to site v # and another form site v to site u: this is # harmless, so warn_duplicates=False sg.add_edge(from_index=n, from_jimage=(0, 0, 0), to_index=neighbor['site_index'], to_jimage=neighbor['image'], weight=neighbor['weight'] if weights else None, warn_duplicates=False) return sg @property def name(self): """ :return: Name of graph """ return self.graph.graph['name'] @property def edge_weight_name(self): """ :return: Name of the edge weight property of graph """ return self.graph.graph['edge_weight_name'] @property def edge_weight_unit(self): """ :return: Units of the edge weight property of graph """ return self.graph.graph['edge_weight_units'] def add_edge(self, from_index, to_index, from_jimage=(0, 0, 0), to_jimage=None, weight=None, warn_duplicates=True, edge_properties=None): """ Add edge to graph. Since physically a 'bond' (or other connection between sites) doesn't have a direction, from_index, from_jimage can be swapped with to_index, to_jimage. However, images will always always be shifted so that from_index < to_index and from_jimage becomes (0, 0, 0). :param from_index: index of site connecting from :param to_index: index of site connecting to :param from_jimage (tuple of ints): lattice vector of periodic image, e.g. (1, 0, 0) for periodic image in +x direction :param to_jimage (tuple of ints): lattice vector of image :param weight (float): e.g. bond length :param warn_duplicates (bool): if True, will warn if trying to add duplicate edges (duplicate edges will not be added in either case) :param edge_properties (dict): any other information to store on graph edges, similar to Structure's site_properties :return: """ # this is not necessary for the class to work, but # just makes it neater if to_index < from_index: to_index, from_index = from_index, to_index to_jimage, from_jimage = from_jimage, to_jimage # constrain all from_jimages to be (0, 0, 0), # initial version of this class worked even if # from_jimage != (0, 0, 0), but making this # assumption simplifies logic later if not np.array_equal(from_jimage, (0, 0, 0)): shift = from_jimage from_jimage = np.subtract(from_jimage, shift) to_jimage = np.subtract(to_jimage, shift) # automatic detection of to_jimage if user doesn't specify # will try and detect all equivalent images and add multiple # edges if appropriate if to_jimage is None: # assume we want the closest site warnings.warn("Please specify to_jimage to be unambiguous, " "trying to automatically detect.") dist, to_jimage = self.structure[from_index] \ .distance_and_image(self.structure[to_index]) if dist == 0: # this will happen when from_index == to_index, # typically in primitive single-atom lattices images = [1, 0, 0], [0, 1, 0], [0, 0, 1] dists = [] for image in images: dists.append(self.structure[from_index] .distance_and_image(self.structure[from_index], jimage=image)[0]) dist = min(dists) equiv_sites = self.structure.get_neighbors_in_shell(self.structure[from_index].coords, dist, dist * 0.01, include_index=True) for site, dist, to_index in equiv_sites: to_jimage = np.subtract(site.frac_coords, self.structure[from_index].frac_coords) to_jimage = np.round(to_jimage).astype(int) self.add_edge(from_index=from_index, from_jimage=(0, 0, 0), to_jimage=to_jimage, to_index=to_index) return # sanitize types from_jimage, to_jimage = tuple(map(int, from_jimage)), tuple(map(int, to_jimage)) from_index, to_index = int(from_index), int(to_index) # check we're not trying to add a duplicate edge # there should only ever be at most one edge # between a given (site, jimage) pair and another # (site, jimage) pair existing_edge_data = self.graph.get_edge_data(from_index, to_index) if existing_edge_data: for key, d in existing_edge_data.items(): if d["to_jimage"] == to_jimage: if warn_duplicates: warnings.warn("Trying to add an edge that already exists from " "site {} to site {} in {}.".format(from_index, to_index, to_jimage)) return # generic container for additional edge properties, # similar to site properties edge_properties = edge_properties or {} if weight: self.graph.add_edge(from_index, to_index, to_jimage=to_jimage, weight=weight, **edge_properties) else: self.graph.add_edge(from_index, to_index, to_jimage=to_jimage, **edge_properties) def insert_node(self, i, species, coords, coords_are_cartesian=False, validate_proximity=False, site_properties=None, edges=None): """ A wrapper around Molecule.insert(), which also incorporates the new site into the MoleculeGraph. :param i: Index at which to insert the new site :param species: Species for the new site :param coords: 3x1 array representing coordinates of the new site :param coords_are_cartesian: Whether coordinates are cartesian. Defaults to False. :param validate_proximity: For Molecule.insert(); if True (default False), distance will be checked to ensure that site can be safely added. :param site_properties: Site properties for Molecule :param edges: List of dicts representing edges to be added to the MoleculeGraph. These edges must include the index of the new site i, and all indices used for these edges should reflect the MoleculeGraph AFTER the insertion, NOT before. Each dict should at least have a "to_index" and "from_index" key, and can also have a "weight" and a "properties" key. :return: """ self.structure.insert(i, species, coords, coords_are_cartesian=coords_are_cartesian, validate_proximity=validate_proximity, properties=site_properties) mapping = {} for j in range(len(self.structure) - 1): if j < i: mapping[j] = j else: mapping[j] = j + 1 nx.relabel_nodes(self.graph, mapping, copy=False) self.graph.add_node(i) self.set_node_attributes() if edges is not None: for edge in edges: try: self.add_edge(edge["from_index"], edge["to_index"], from_jimage=(0, 0, 0), to_jimage=edge["to_jimage"], weight=edge.get("weight", None), edge_properties=edge.get("properties", None)) except KeyError: raise RuntimeError("Some edges are invalid.") def set_node_attributes(self): """ Gives each node a "specie" and a "coords" attribute, updated with the current species and coordinates. :return: """ species = {} coords = {} properties = {} for node in self.graph.nodes(): species[node] = self.structure[node].specie.symbol coords[node] = self.structure[node].coords properties[node] = self.structure[node].properties nx.set_node_attributes(self.graph, species, "specie") nx.set_node_attributes(self.graph, coords, "coords") nx.set_node_attributes(self.graph, properties, "properties") def alter_edge(self, from_index, to_index, to_jimage=None, new_weight=None, new_edge_properties=None): """ Alters either the weight or the edge_properties of an edge in the StructureGraph. :param from_index: int :param to_index: int :param to_jimage: tuple :param new_weight: alter_edge does not require that weight be altered. As such, by default, this is None. If weight is to be changed, it should be a float. :param new_edge_properties: alter_edge does not require that edge_properties be altered. As such, by default, this is None. If any edge properties are to be changed, it should be a dictionary of edge properties to be changed. :return: """ existing_edges = self.graph.get_edge_data(from_index, to_index) # ensure that edge exists before attempting to change it if not existing_edges: raise ValueError("Edge between {} and {} cannot be altered;\ no edge exists between those sites.".format( from_index, to_index )) if to_jimage is None: edge_index = 0 else: for i, properties in existing_edges.items(): if properties["to_jimage"] == to_jimage: edge_index = i if new_weight is not None: self.graph[from_index][to_index][edge_index]['weight'] = new_weight if new_edge_properties is not None: for prop in list(new_edge_properties.keys()): self.graph[from_index][to_index][edge_index][prop] = new_edge_properties[prop] def break_edge(self, from_index, to_index, to_jimage=None, allow_reverse=False): """ Remove an edge from the StructureGraph. If no image is given, this method will fail. :param from_index: int :param to_index: int :param to_jimage: tuple :param allow_reverse: If allow_reverse is True, then break_edge will attempt to break both (from_index, to_index) and, failing that, will attempt to break (to_index, from_index). :return: """ # ensure that edge exists before attempting to remove it existing_edges = self.graph.get_edge_data(from_index, to_index) existing_reverse = None if to_jimage is None: raise ValueError("Image must be supplied, to avoid ambiguity.") if existing_edges: for i, properties in existing_edges.items(): if properties["to_jimage"] == to_jimage: edge_index = i self.graph.remove_edge(from_index, to_index, edge_index) else: if allow_reverse: existing_reverse = self.graph.get_edge_data(to_index, from_index) if existing_reverse: for i, properties in existing_reverse.items(): if properties["to_jimage"] == to_jimage: edge_index = i self.graph.remove_edge(to_index, from_index, edge_index) else: raise ValueError("Edge cannot be broken between {} and {};\ no edge exists between those sites.".format( from_index, to_index )) def remove_nodes(self, indices): """ A wrapper for Molecule.remove_sites(). :param indices: list of indices in the current Molecule (and graph) to be removed. :return: """ self.structure.remove_sites(indices) self.graph.remove_nodes_from(indices) mapping = {} for correct, current in enumerate(sorted(self.graph.nodes)): mapping[current] = correct nx.relabel_nodes(self.graph, mapping, copy=False) self.set_node_attributes() def substitute_group(self, index, func_grp, strategy, bond_order=1, graph_dict=None, strategy_params=None): """ Builds off of Structure.substitute to replace an atom in self.structure with a functional group. This method also amends self.graph to incorporate the new functional group. NOTE: Care must be taken to ensure that the functional group that is substituted will not place atoms to close to each other, or violate the dimensions of the Lattice. :param index: Index of atom to substitute. :param func_grp: Substituent molecule. There are two options: 1. Providing an actual Molecule as the input. The first atom must be a DummySpecie X, indicating the position of nearest neighbor. The second atom must be the next nearest atom. For example, for a methyl group substitution, func_grp should be X-CH3, where X is the first site and C is the second site. What the code will do is to remove the index site, and connect the nearest neighbor to the C atom in CH3. The X-C bond indicates the directionality to connect the atoms. 2. A string name. The molecule will be obtained from the relevant template in func_groups.json. :param strategy: Class from pymatgen.analysis.local_env. :param bond_order: A specified bond order to calculate the bond length between the attached functional group and the nearest neighbor site. Defaults to 1. :param graph_dict: Dictionary representing the bonds of the functional group (format: {(u, v): props}, where props is a dictionary of properties, including weight. If None, then the algorithm will attempt to automatically determine bonds using one of a list of strategies defined in pymatgen.analysis.local_env. :param strategy_params: dictionary of keyword arguments for strategy. If None, default parameters will be used. :return: """ def map_indices(grp): grp_map = {} # Get indices now occupied by functional group # Subtracting 1 because the dummy atom X should not count atoms = len(grp) - 1 offset = len(self.structure) - atoms for i in range(atoms): grp_map[i] = i + offset return grp_map if isinstance(func_grp, Molecule): func_grp = copy.deepcopy(func_grp) else: try: func_grp = copy.deepcopy(FunctionalGroups[func_grp]) except Exception: raise RuntimeError("Can't find functional group in list. " "Provide explicit coordinate instead") self.structure.substitute(index, func_grp, bond_order=bond_order) mapping = map_indices(func_grp) # Remove dummy atom "X" func_grp.remove_species("X") if graph_dict is not None: for (u, v) in graph_dict.keys(): edge_props = graph_dict[(u, v)] if "to_jimage" in edge_props.keys(): to_jimage = edge_props["to_jimage"] del edge_props["to_jimage"] else: # By default, assume that all edges should stay remain # inside the initial image to_jimage = (0, 0, 0) if "weight" in edge_props.keys(): weight = edge_props["weight"] del edge_props["weight"] self.add_edge(mapping[u], mapping[v], to_jimage=to_jimage, weight=weight, edge_properties=edge_props) else: if strategy_params is None: strategy_params = {} strat = strategy(**strategy_params) for site in mapping.values(): neighbors = strat.get_nn_info(self.structure, site) for neighbor in neighbors: self.add_edge(from_index=site, from_jimage=(0, 0, 0), to_index=neighbor['site_index'], to_jimage=neighbor['image'], weight=neighbor['weight'], warn_duplicates=False) def get_connected_sites(self, n, jimage=(0, 0, 0)): """ Returns a named tuple of neighbors of site n: periodic_site, jimage, index, weight. Index is the index of the corresponding site in the original structure, weight can be None if not defined. :param n: index of Site in Structure :param jimage: lattice vector of site :return: list of ConnectedSite tuples, sorted by closest first """ connected_sites = set() connected_site_images = set() out_edges = [(u, v, d, 'out') for u, v, d in self.graph.out_edges(n, data=True)] in_edges = [(u, v, d, 'in') for u, v, d in self.graph.in_edges(n, data=True)] for u, v, d, dir in out_edges + in_edges: to_jimage = d['to_jimage'] if dir == 'in': u, v = v, u to_jimage = np.multiply(-1, to_jimage) to_jimage = tuple(map(int, np.add(to_jimage, jimage))) site_d = self.structure[v].as_dict() site_d['abc'] = np.add(site_d['abc'], to_jimage).tolist() site = PeriodicSite.from_dict(site_d) # from_site if jimage arg != (0, 0, 0) relative_jimage = np.subtract(to_jimage, jimage) dist = self.structure[u].distance(self.structure[v], jimage=relative_jimage) weight = d.get('weight', None) if (v, to_jimage) not in connected_site_images: connected_site = ConnectedSite(site=site, jimage=to_jimage, index=v, weight=weight, dist=dist) connected_sites.add(connected_site) connected_site_images.add((v, to_jimage)) # return list sorted by closest sites first connected_sites = list(connected_sites) connected_sites.sort(key=lambda x: x.dist) return connected_sites def get_coordination_of_site(self, n): """ Returns the number of neighbors of site n. In graph terms, simply returns degree of node corresponding to site n. :param n: index of site :return (int): """ number_of_self_loops = sum([1 for n, v in self.graph.edges(n) if n == v]) return self.graph.degree(n) - number_of_self_loops def draw_graph_to_file(self, filename="graph", diff=None, hide_unconnected_nodes=False, hide_image_edges=True, edge_colors=False, node_labels=False, weight_labels=False, image_labels=False, color_scheme="VESTA", keep_dot=False, algo="fdp"): """ Draws graph using GraphViz. The networkx graph object itself can also be drawn with networkx's in-built graph drawing methods, but note that this might give misleading results for multigraphs (edges are super-imposed on each other). If visualization is difficult to interpret, `hide_image_edges` can help, especially in larger graphs. :param filename: filename to output, will detect filetype from extension (any graphviz filetype supported, such as pdf or png) :param diff (StructureGraph): an additional graph to compare with, will color edges red that do not exist in diff and edges green that are in diff graph but not in the reference graph :param hide_unconnected_nodes: if True, hide unconnected nodes :param hide_image_edges: if True, do not draw edges that go through periodic boundaries :param edge_colors (bool): if True, use node colors to color edges :param node_labels (bool): if True, label nodes with species and site index :param weight_labels (bool): if True, label edges with weights :param image_labels (bool): if True, label edges with their periodic images (usually only used for debugging, edges to periodic images always appear as dashed lines) :param color_scheme (str): "VESTA" or "JMOL" :param keep_dot (bool): keep GraphViz .dot file for later visualization :param algo: any graphviz algo, "neato" (for simple graphs) or "fdp" (for more crowded graphs) usually give good outputs :return: """ if not which(algo): raise RuntimeError("StructureGraph graph drawing requires " "GraphViz binaries to be in the path.") # Developer note: NetworkX also has methods for drawing # graphs using matplotlib, these also work here. However, # a dedicated tool like GraphViz allows for much easier # control over graph appearance and also correctly displays # mutli-graphs (matplotlib can superimpose multiple edges). g = self.graph.copy() g.graph = {'nodesep': 10.0, 'dpi': 300, 'overlap': "false"} # add display options for nodes for n in g.nodes(): # get label by species name label = "{}({})".format(str(self.structure[n].specie), n) if node_labels else "" # use standard color scheme for nodes c = EL_COLORS[color_scheme].get(str(self.structure[n].specie.symbol), [0, 0, 0]) # get contrasting font color # magic numbers account for perceived luminescence # https://stackoverflow.com/questions/1855884/determine-font-color-based-on-background-color fontcolor = '#000000' if 1 - (c[0] * 0.299 + c[1] * 0.587 + c[2] * 0.114) / 255 < 0.5 else '#ffffff' # convert color to hex string color = "#{:02x}{:02x}{:02x}".format(c[0], c[1], c[2]) g.add_node(n, fillcolor=color, fontcolor=fontcolor, label=label, fontname="Helvetica-bold", style="filled", shape="circle") edges_to_delete = [] # add display options for edges for u, v, k, d in g.edges(keys=True, data=True): # retrieve from/to images, set as origin if not defined to_image = d['to_jimage'] # set edge style d['style'] = "solid" if to_image != (0, 0, 0): d['style'] = "dashed" if hide_image_edges: edges_to_delete.append((u, v, k)) # don't show edge directions d['arrowhead'] = "none" # only add labels for images that are not the origin if image_labels: d['headlabel'] = "" if to_image == (0, 0, 0) else "to {}".format((to_image)) d['arrowhead'] = "normal" if d['headlabel'] else "none" # optionally color edges using node colors color_u = g.nodes[u]['fillcolor'] color_v = g.nodes[v]['fillcolor'] d['color_uv'] = "{};0.5:{};0.5".format(color_u, color_v) if edge_colors else "#000000" # optionally add weights to graph if weight_labels: units = g.graph.get('edge_weight_units', "") if d.get('weight'): d['label'] = "{:.2f} {}".format(d['weight'], units) # update edge with our new style attributes g.edges[u, v, k].update(d) # optionally remove periodic image edges, # these can be confusing due to periodic boundaries if hide_image_edges: for edge_to_delete in edges_to_delete: g.remove_edge(*edge_to_delete) # optionally hide unconnected nodes, # these can appear when removing periodic edges if hide_unconnected_nodes: g = g.subgraph([n for n in g.degree() if g.degree()[n] != 0]) # optionally highlight differences with another graph if diff: diff = self.diff(diff, strict=True) green_edges = [] red_edges = [] for u, v, k, d in g.edges(keys=True, data=True): if (u, v, d['to_jimage']) in diff['self']: # edge has been deleted red_edges.append((u, v, k)) elif (u, v, d['to_jimage']) in diff['other']: # edge has been added green_edges.append((u, v, k)) for u, v, k in green_edges: g.edges[u, v, k].update({'color_uv': '#00ff00'}) for u, v, k in red_edges: g.edges[u, v, k].update({'color_uv': '#ff0000'}) basename, extension = os.path.splitext(filename) extension = extension[1:] write_dot(g, basename + ".dot") with open(filename, "w") as f: args = [algo, "-T", extension, basename + ".dot"] rs = subprocess.Popen(args, stdout=f, stdin=subprocess.PIPE, close_fds=True) rs.communicate() if rs.returncode != 0: raise RuntimeError("{} exited with return code {}.".format(algo, rs.returncode)) if not keep_dot: os.remove(basename + ".dot") @property def types_and_weights_of_connections(self): """ Extract a dictionary summarizing the types and weights of edges in the graph. :return: A dictionary with keys specifying the species involved in a connection in alphabetical order (e.g. string 'Fe-O') and values which are a list of weights for those connections (e.g. bond lengths). """ def get_label(u, v): u_label = self.structure[u].species_string v_label = self.structure[v].species_string return "-".join(sorted((u_label, v_label))) types = defaultdict(list) for u, v, d in self.graph.edges(data=True): label = get_label(u, v) types[label].append(d['weight']) return dict(types) @property def weight_statistics(self): """ Extract a statistical summary of edge weights present in the graph. :return: A dict with an 'all_weights' list, 'minimum', 'maximum', 'median', 'mean', 'std_dev' """ all_weights = [d.get('weight', None) for u, v, d in self.graph.edges(data=True)] stats = describe(all_weights, nan_policy='omit') return { 'all_weights': all_weights, 'min': stats.minmax[0], 'max': stats.minmax[1], 'mean': stats.mean, 'variance': stats.variance } def types_of_coordination_environments(self, anonymous=False): """ Extract information on the different co-ordination environments present in the graph. :param anonymous: if anonymous, will replace specie names with A, B, C, etc. :return: a list of co-ordination environments, e.g. ['Mo-S(6)', 'S-Mo(3)'] """ motifs = set() for idx, site in enumerate(self.structure): centre_sp = site.species_string connected_sites = self.get_connected_sites(idx) connected_species = [connected_site.site.species_string for connected_site in connected_sites] labels = [] for sp in set(connected_species): count = connected_species.count(sp) labels.append((count, sp)) labels = sorted(labels, reverse=True) if anonymous: mapping = {centre_sp: 'A'} available_letters = [chr(66 + i) for i in range(25)] for label in labels: sp = label[1] if sp not in mapping: mapping[sp] = available_letters.pop(0) centre_sp = 'A' labels = [(label[0], mapping[label[1]]) for label in labels] labels = ["{}({})".format(label[1], label[0]) for label in labels] motif = '{}-{}'.format(centre_sp, ','.join(labels)) motifs.add(motif) return sorted(list(motifs)) def as_dict(self): """ As in :Class: `pymatgen.core.Structure` except with using `to_dict_of_dicts` from NetworkX to store graph information. """ d = {"@module": self.__class__.__module__, "@class": self.__class__.__name__, "structure": self.structure.as_dict(), "graphs": json_graph.adjacency_data(self.graph)} return d @classmethod def from_dict(cls, d): """ As in :Class: `pymatgen.core.Structure` except restoring graphs using `from_dict_of_dicts` from NetworkX to restore graph information. """ s = Structure.from_dict(d['structure']) return cls(s, d['graphs']) def __mul__(self, scaling_matrix): """ Replicates the graph, creating a supercell, intelligently joining together edges that lie on periodic boundaries. In principle, any operations on the expanded graph could also be done on the original graph, but a larger graph can be easier to visualize and reason about. :param scaling_matrix: same as Structure.__mul__ :return: """ # Developer note: a different approach was also trialed, using # a simple Graph (instead of MultiDiGraph), with node indices # representing both site index and periodic image. Here, the # number of nodes != number of sites in the Structure. This # approach has many benefits, but made it more difficult to # keep the graph in sync with its corresponding Structure. # Broadly, it would be easier to multiply the Structure # *before* generating the StructureGraph, but this isn't # possible when generating the graph using critic2 from # charge density. # Multiplication works by looking for the expected position # of an image node, and seeing if that node exists in the # supercell. If it does, the edge is updated. This is more # computationally expensive than just keeping track of the # which new lattice images present, but should hopefully be # easier to extend to a general 3x3 scaling matrix. # code adapted from Structure.__mul__ scale_matrix = np.array(scaling_matrix, np.int16) if scale_matrix.shape != (3, 3): scale_matrix = np.array(scale_matrix * np.eye(3), np.int16) else: # TODO: test __mul__ with full 3x3 scaling matrices raise NotImplementedError('Not tested with 3x3 scaling matrices yet.') new_lattice = Lattice(np.dot(scale_matrix, self.structure.lattice.matrix)) f_lat = lattice_points_in_supercell(scale_matrix) c_lat = new_lattice.get_cartesian_coords(f_lat) new_sites = [] new_graphs = [] for v in c_lat: # create a map of nodes from original graph to its image mapping = {n: n + len(new_sites) for n in range(len(self.structure))} for idx, site in enumerate(self.structure): s = PeriodicSite(site.species, site.coords + v, new_lattice, properties=site.properties, coords_are_cartesian=True, to_unit_cell=False) new_sites.append(s) new_graphs.append(nx.relabel_nodes(self.graph, mapping, copy=True)) new_structure = Structure.from_sites(new_sites) # merge all graphs into one big graph new_g = nx.MultiDiGraph() for new_graph in new_graphs: new_g = nx.union(new_g, new_graph) edges_to_remove = [] # tuple of (u, v, k) edges_to_add = [] # tuple of (u, v, attr_dict) # list of new edges inside supercell # for duplicate checking edges_inside_supercell = [{u, v} for u, v, d in new_g.edges(data=True) if d['to_jimage'] == (0, 0, 0)] new_periodic_images = [] orig_lattice = self.structure.lattice # use k-d tree to match given position to an # existing Site in Structure kd_tree = KDTree(new_structure.cart_coords) # tolerance in Å for sites to be considered equal # this could probably be a lot smaller tol = 0.05 for u, v, k, d in new_g.edges(keys=True, data=True): to_jimage = d['to_jimage'] # for node v # reduce unnecessary checking if to_jimage != (0, 0, 0): # get index in original site n_u = u % len(self.structure) n_v = v % len(self.structure) # get fractional co-ordinates of where atoms defined # by edge are expected to be, relative to original # lattice (keeping original lattice has # significant benefits) v_image_frac = np.add(self.structure[n_v].frac_coords, to_jimage) u_frac = self.structure[n_u].frac_coords # using the position of node u as a reference, # get relative Cartesian co-ordinates of where # atoms defined by edge are expected to be v_image_cart = orig_lattice.get_cartesian_coords(v_image_frac) u_cart = orig_lattice.get_cartesian_coords(u_frac) v_rel = np.subtract(v_image_cart, u_cart) # now retrieve position of node v in # new supercell, and get asgolute Cartesian # co-ordinates of where atoms defined by edge # are expected to be v_expec = new_structure[u].coords + v_rel # now search in new structure for these atoms # query returns (distance, index) v_present = kd_tree.query(v_expec) v_present = v_present[1] if v_present[0] <= tol else None # check if image sites now present in supercell # and if so, delete old edge that went through # periodic boundary if v_present is not None: new_u = u new_v = v_present new_d = d.copy() # node now inside supercell new_d['to_jimage'] = (0, 0, 0) edges_to_remove.append((u, v, k)) # make sure we don't try to add duplicate edges # will remove two edges for everyone one we add if {new_u, new_v} not in edges_inside_supercell: # normalize direction if new_v < new_u: new_u, new_v = new_v, new_u edges_inside_supercell.append({new_u, new_v}) edges_to_add.append((new_u, new_v, new_d)) else: # want to find new_v such that we have # full periodic boundary conditions # so that nodes on one side of supercell # are connected to nodes on opposite side v_expec_frac = new_structure.lattice.get_fractional_coords(v_expec) # find new to_jimage # use np.around to fix issues with finite precision leading to incorrect image v_expec_image = np.around(v_expec_frac, decimals=3) v_expec_image = v_expec_image - v_expec_image % 1 v_expec_frac = np.subtract(v_expec_frac, v_expec_image) v_expec = new_structure.lattice.get_cartesian_coords(v_expec_frac) v_present = kd_tree.query(v_expec) v_present = v_present[1] if v_present[0] <= tol else None if v_present is not None: new_u = u new_v = v_present new_d = d.copy() new_to_jimage = tuple(map(int, v_expec_image)) # normalize direction if new_v < new_u: new_u, new_v = new_v, new_u new_to_jimage = tuple(np.multiply(-1, d['to_jimage']).astype(int)) new_d['to_jimage'] = new_to_jimage edges_to_remove.append((u, v, k)) if (new_u, new_v, new_to_jimage) not in new_periodic_images: edges_to_add.append((new_u, new_v, new_d)) new_periodic_images.append((new_u, new_v, new_to_jimage)) logger.debug("Removing {} edges, adding {} new edges.".format(len(edges_to_remove), len(edges_to_add))) # add/delete marked edges for edges_to_remove in edges_to_remove: new_g.remove_edge(*edges_to_remove) for (u, v, d) in edges_to_add: new_g.add_edge(u, v, **d) # return new instance of StructureGraph with supercell d = {"@module": self.__class__.__module__, "@class": self.__class__.__name__, "structure": new_structure.as_dict(), "graphs": json_graph.adjacency_data(new_g)} sg = StructureGraph.from_dict(d) return sg def __rmul__(self, other): return self.__mul__(other) def _edges_to_string(self, g): header = "from to to_image " header_line = "---- ---- ------------" edge_weight_name = g.graph["edge_weight_name"] if edge_weight_name: print_weights = ["weight"] edge_label = g.graph["edge_weight_name"] edge_weight_units = g.graph["edge_weight_units"] if edge_weight_units: edge_label += " ({})".format(edge_weight_units) header += " {}".format(edge_label) header_line += " {}".format("-" * max([18, len(edge_label)])) else: print_weights = False s = header + "\n" + header_line + "\n" edges = list(g.edges(data=True)) # sort edges for consistent ordering edges.sort(key=itemgetter(0, 1)) if print_weights: for u, v, data in edges: s += "{:4} {:4} {:12} {:.3e}\n".format(u, v, str(data.get("to_jimage", (0, 0, 0))), data.get("weight", 0)) else: for u, v, data in edges: s += "{:4} {:4} {:12}\n".format(u, v, str(data.get("to_jimage", (0, 0, 0)))) return s def __str__(self): s = "Structure Graph" s += "\nStructure: \n{}".format(self.structure.__str__()) s += "\nGraph: {}\n".format(self.name) s += self._edges_to_string(self.graph) return s def __repr__(self): s = "Structure Graph" s += "\nStructure: \n{}".format(self.structure.__repr__()) s += "\nGraph: {}\n".format(self.name) s += self._edges_to_string(self.graph) return s def __len__(self): """ :return: length of Structure / number of nodes in graph """ return len(self.structure) def sort(self, key=None, reverse=False): """ Same as Structure.sort(), also remaps nodes in graph. :param key: :param reverse: :return: """ old_structure = self.structure.copy() # sort Structure self.structure._sites = sorted(self.structure._sites, key=key, reverse=reverse) # apply Structure ordering to graph mapping = {idx: self.structure.index(site) for idx, site in enumerate(old_structure)} self.graph = nx.relabel_nodes(self.graph, mapping, copy=True) # normalize directions of edges edges_to_remove = [] edges_to_add = [] for u, v, k, d in self.graph.edges(keys=True, data=True): if v < u: new_v, new_u, new_d = u, v, d.copy() new_d['to_jimage'] = tuple(np.multiply(-1, d['to_jimage']).astype(int)) edges_to_remove.append((u, v, k)) edges_to_add.append((new_u, new_v, new_d)) # add/delete marked edges for edges_to_remove in edges_to_remove: self.graph.remove_edge(*edges_to_remove) for (u, v, d) in edges_to_add: self.graph.add_edge(u, v, **d) def __copy__(self): return StructureGraph.from_dict(self.as_dict()) def __eq__(self, other): """ Two StructureGraphs are equal if they have equal Structures, and have the same edges between Sites. Edge weights can be different and StructureGraphs can still be considered equal. :param other: StructureGraph :return (bool): """ # sort for consistent node indices # PeriodicSite should have a proper __hash__() value, # using its frac_coords as a convenient key mapping = {tuple(site.frac_coords): self.structure.index(site) for site in other.structure} other_sorted = other.__copy__() other_sorted.sort(key=lambda site: mapping[tuple(site.frac_coords)]) edges = {(u, v, d['to_jimage']) for u, v, d in self.graph.edges(keys=False, data=True)} edges_other = {(u, v, d['to_jimage']) for u, v, d in other_sorted.graph.edges(keys=False, data=True)} return (edges == edges_other) and \ (self.structure == other_sorted.structure) def diff(self, other, strict=True): """ Compares two StructureGraphs. Returns dict with keys 'self', 'other', 'both' with edges that are present in only one StructureGraph ('self' and 'other'), and edges that are present in both. The Jaccard distance is a simple measure of the dissimilarity between two StructureGraphs (ignoring edge weights), and is defined by 1 - (size of the intersection / size of the union) of the sets of edges. This is returned with key 'dist'. Important note: all node indices are in terms of the StructureGraph this method is called from, not the 'other' StructureGraph: there is no guarantee the node indices will be the same if the underlying Structures are ordered differently. :param other: StructureGraph :param strict: if False, will compare bonds from different Structures, with node indices replaced by Specie strings, will not count number of occurrences of bonds :return: """ if self.structure != other.structure and strict: return ValueError("Meaningless to compare StructureGraphs if " "corresponding Structures are different.") if strict: # sort for consistent node indices # PeriodicSite should have a proper __hash__() value, # using its frac_coords as a convenient key mapping = {tuple(site.frac_coords): self.structure.index(site) for site in other.structure} other_sorted = other.__copy__() other_sorted.sort(key=lambda site: mapping[tuple(site.frac_coords)]) edges = {(u, v, d['to_jimage']) for u, v, d in self.graph.edges(keys=False, data=True)} edges_other = {(u, v, d['to_jimage']) for u, v, d in other_sorted.graph.edges(keys=False, data=True)} else: edges = {(str(self.structure[u].specie), str(self.structure[v].specie)) for u, v, d in self.graph.edges(keys=False, data=True)} edges_other = {(str(other.structure[u].specie), str(other.structure[v].specie)) for u, v, d in other.graph.edges(keys=False, data=True)} if len(edges) == 0 and len(edges_other) == 0: jaccard_dist = 0 # by definition else: jaccard_dist = 1 - len(edges.intersection(edges_other)) / len(edges.union(edges_other)) return { 'self': edges - edges_other, 'other': edges_other - edges, 'both': edges.intersection(edges_other), 'dist': jaccard_dist } def get_subgraphs_as_molecules(self, use_weights=False): """ Retrieve subgraphs as molecules, useful for extracting molecules from periodic crystals. Will only return unique molecules, not any duplicates present in the crystal (a duplicate defined as an isomorphic subgraph). :param use_weights (bool): If True, only treat subgraphs as isomorphic if edges have the same weights. Typically, this means molecules will need to have the same bond lengths to be defined as duplicates, otherwise bond lengths can differ. This is a fairly robust approach, but will treat e.g. enantiomers as being duplicates. :return: list of unique Molecules in Structure """ # creating a supercell is an easy way to extract # molecules (and not, e.g., layers of a 2D crystal) # without adding extra logic if getattr(self, '_supercell_sg', None) is None: self._supercell_sg = supercell_sg = self * (3, 3, 3) # make undirected to find connected subgraphs supercell_sg.graph = nx.Graph(supercell_sg.graph) # find subgraphs all_subgraphs = [supercell_sg.graph.subgraph(c) for c in nx.connected_components(supercell_sg.graph)] # discount subgraphs that lie across *supercell* boundaries # these will subgraphs representing crystals molecule_subgraphs = [] for subgraph in all_subgraphs: intersects_boundary = any([d['to_jimage'] != (0, 0, 0) for u, v, d in subgraph.edges(data=True)]) if not intersects_boundary: molecule_subgraphs.append(nx.MultiDiGraph(subgraph)) # add specie names to graph to be able to test for isomorphism for subgraph in molecule_subgraphs: for n in subgraph: subgraph.add_node(n, specie=str(supercell_sg.structure[n].specie)) # now define how we test for isomorphism def node_match(n1, n2): return n1['specie'] == n2['specie'] def edge_match(e1, e2): if use_weights: return e1['weight'] == e2['weight'] else: return True # prune duplicate subgraphs unique_subgraphs = [] for subgraph in molecule_subgraphs: already_present = [nx.is_isomorphic(subgraph, g, node_match=node_match, edge_match=edge_match) for g in unique_subgraphs] if not any(already_present): unique_subgraphs.append(subgraph) # get Molecule objects for each subgraph molecules = [] for subgraph in unique_subgraphs: coords = [supercell_sg.structure[n].coords for n in subgraph.nodes()] species = [supercell_sg.structure[n].specie for n in subgraph.nodes()] molecule = Molecule(species, coords) # shift so origin is at center of mass molecule = molecule.get_centered_molecule() molecules.append(molecule) return molecules class MolGraphSplitError(Exception): """ Raised when a molecule graph is failed to split into two disconnected subgraphs """ pass class MoleculeGraph(MSONable): """ This is a class for annotating a Molecule with bond information, stored in the form of a graph. A "bond" does not necessarily have to be a chemical bond, but can store any kind of information that connects two Sites. """ def __init__(self, molecule, graph_data=None): """ If constructing this class manually, use the `with_empty_graph` method or `with_local_env_strategy` method (using an algorithm provided by the `local_env` module, such as O'Keeffe). This class that contains connection information: relationships between sites represented by a Graph structure, and an associated structure object. This class uses the NetworkX package to store and operate on the graph itself, but contains a lot of helper methods to make associating a graph with a given molecule easier. Use cases for this include storing bonding information, NMR J-couplings, Heisenberg exchange parameters, etc. :param molecule: Molecule object :param graph_data: dict containing graph information in dict format (not intended to be constructed manually, see as_dict method for format) """ if isinstance(molecule, MoleculeGraph): # just make a copy from input graph_data = molecule.as_dict()['graphs'] self.molecule = molecule self.graph = nx.readwrite.json_graph.adjacency_graph(graph_data) # tidy up edge attr dicts, reading to/from json duplicates # information for u, v, k, d in self.graph.edges(keys=True, data=True): if 'id' in d: del d['id'] if 'key' in d: del d['key'] # ensure images are tuples (conversion to lists happens # when serializing back from json), it's important images # are hashable/immutable if 'to_jimage' in d: d['to_jimage'] = tuple(d['to_jimage']) if 'from_jimage' in d: d['from_jimage'] = tuple(d['from_jimage']) self.set_node_attributes() @classmethod def with_empty_graph(cls, molecule, name="bonds", edge_weight_name=None, edge_weight_units=None): """ Constructor for MoleculeGraph, returns a MoleculeGraph object with an empty graph (no edges, only nodes defined that correspond to Sites in Molecule). :param molecule (Molecule): :param name (str): name of graph, e.g. "bonds" :param edge_weight_name (str): name of edge weights, e.g. "bond_length" or "exchange_constant" :param edge_weight_units (str): name of edge weight units e.g. "Å" or "eV" :return (MoleculeGraph): """ if edge_weight_name and (edge_weight_units is None): raise ValueError("Please specify units associated " "with your edge weights. Can be " "empty string if arbitrary or " "dimensionless.") # construct graph with one node per site # graph attributes don't change behavior of graph, # they're just for book-keeping graph = nx.MultiDiGraph(edge_weight_name=edge_weight_name, edge_weight_units=edge_weight_units, name=name) graph.add_nodes_from(range(len(molecule))) graph_data = json_graph.adjacency_data(graph) return cls(molecule, graph_data=graph_data) @staticmethod def with_edges(molecule, edges): """ Constructor for MoleculeGraph, using pre-existing or pre-defined edges with optional edge parameters. :param molecule: Molecule object :param edges: dict representing the bonds of the functional group (format: {(u, v): props}, where props is a dictionary of properties, including weight. Props should be None if no additional properties are to be specified. :return: mg, a MoleculeGraph """ mg = MoleculeGraph.with_empty_graph(molecule, name="bonds", edge_weight_name="weight", edge_weight_units="") for edge, props in edges.items(): try: from_index = edge[0] to_index = edge[1] except TypeError: raise ValueError("Edges must be given as (from_index, to_index)" "tuples") if props is not None: if "weight" in props.keys(): weight = props["weight"] del props["weight"] else: weight = None if len(props.items()) == 0: props = None else: weight = None nodes = mg.graph.nodes if not (from_index in nodes and to_index in nodes): raise ValueError("Edges cannot be added if nodes are not" " present in the graph. Please check your" " indices.") mg.add_edge(from_index, to_index, weight=weight, edge_properties=props) mg.set_node_attributes() return mg @staticmethod def with_local_env_strategy(molecule, strategy, reorder=True, extend_structure=True): """ Constructor for MoleculeGraph, using a strategy from :Class: `pymatgen.analysis.local_env`. :param molecule: Molecule object :param strategy: an instance of a :Class: `pymatgen.analysis.local_env.NearNeighbors` object :param reorder: bool, representing if graph nodes need to be reordered following the application of the local_env strategy :param extend_structure: If True (default), then a large artificial box will be placed around the Molecule, because some strategies assume periodic boundary conditions. :return: mg, a MoleculeGraph """ mg = MoleculeGraph.with_empty_graph(molecule, name="bonds", edge_weight_name="weight", edge_weight_units="") # NearNeighbor classes only (generally) work with structures # molecules have to be boxed first coords = molecule.cart_coords if extend_structure: a = max(coords[:, 0]) - min(coords[:, 0]) + 100 b = max(coords[:, 1]) - min(coords[:, 1]) + 100 c = max(coords[:, 2]) - min(coords[:, 2]) + 100 molecule = molecule.get_boxed_structure(a, b, c, no_cross=True) for n in range(len(molecule)): neighbors = strategy.get_nn_info(molecule, n) for neighbor in neighbors: # all bonds in molecules should not cross # (artificial) periodic boundaries if not np.array_equal(neighbor['image'], [0, 0, 0]): continue # local_env will always try to add two edges # for any one bond, one from site u to site v # and another form site v to site u: this is # harmless, so warn_duplicates=False mg.add_edge(from_index=n, to_index=neighbor['site_index'], weight=neighbor['weight'], warn_duplicates=False) if reorder: # Reverse order of nodes to match with molecule n = len(mg.molecule) mapping = {i: (n - i) for i in range(n)} mapping = {i: (j - 1) for i, j in mapping.items()} mg.graph = nx.relabel_nodes(mg.graph, mapping) duplicates = [] for edge in mg.graph.edges: if edge[2] != 0: duplicates.append(edge) for duplicate in duplicates: mg.graph.remove_edge(duplicate[0], duplicate[1], key=duplicate[2]) mg.set_node_attributes() return mg @property def name(self): """ :return: Name of graph """ return self.graph.graph['name'] @property def edge_weight_name(self): """ :return: Name of the edge weight property of graph """ return self.graph.graph['edge_weight_name'] @property def edge_weight_unit(self): """ :return: Units of the edge weight property of graph """ return self.graph.graph['edge_weight_units'] def add_edge(self, from_index, to_index, weight=None, warn_duplicates=True, edge_properties=None): """ Add edge to graph. Since physically a 'bond' (or other connection between sites) doesn't have a direction, from_index, from_jimage can be swapped with to_index, to_jimage. However, images will always always be shifted so that from_index < to_index and from_jimage becomes (0, 0, 0). :param from_index: index of site connecting from :param to_index: index of site connecting to :param weight (float): e.g. bond length :param warn_duplicates (bool): if True, will warn if trying to add duplicate edges (duplicate edges will not be added in either case) :param edge_properties (dict): any other information to store on graph edges, similar to Structure's site_properties :return: """ # this is not necessary for the class to work, but # just makes it neater if to_index < from_index: to_index, from_index = from_index, to_index # sanitize types from_index, to_index = int(from_index), int(to_index) # check we're not trying to add a duplicate edge # there should only ever be at most one edge # between two sites existing_edge_data = self.graph.get_edge_data(from_index, to_index) if existing_edge_data and warn_duplicates: warnings.warn("Trying to add an edge that already exists from " "site {} to site {}.".format(from_index, to_index)) return # generic container for additional edge properties, # similar to site properties edge_properties = edge_properties or {} if weight: self.graph.add_edge(from_index, to_index, weight=weight, **edge_properties) else: self.graph.add_edge(from_index, to_index, **edge_properties) def insert_node(self, i, species, coords, validate_proximity=False, site_properties=None, edges=None): """ A wrapper around Molecule.insert(), which also incorporates the new site into the MoleculeGraph. :param i: Index at which to insert the new site :param species: Species for the new site :param coords: 3x1 array representing coordinates of the new site :param validate_proximity: For Molecule.insert(); if True (default False), distance will be checked to ensure that site can be safely added. :param site_properties: Site properties for Molecule :param edges: List of dicts representing edges to be added to the MoleculeGraph. These edges must include the index of the new site i, and all indices used for these edges should reflect the MoleculeGraph AFTER the insertion, NOT before. Each dict should at least have a "to_index" and "from_index" key, and can also have a "weight" and a "properties" key. :return: """ self.molecule.insert(i, species, coords, validate_proximity=validate_proximity, properties=site_properties) mapping = {} for j in range(len(self.molecule) - 1): if j < i: mapping[j] = j else: mapping[j] = j + 1 nx.relabel_nodes(self.graph, mapping, copy=False) self.graph.add_node(i) self.set_node_attributes() if edges is not None: for edge in edges: try: self.add_edge(edge["from_index"], edge["to_index"], weight=edge.get("weight", None), edge_properties=edge.get("properties", None)) except KeyError: raise RuntimeError("Some edges are invalid.") def set_node_attributes(self): """ Replicates molecule site properties (specie, coords, etc.) in the MoleculeGraph. :return: """ species = {} coords = {} properties = {} for node in self.graph.nodes(): species[node] = self.molecule[node].specie.symbol coords[node] = self.molecule[node].coords properties[node] = self.molecule[node].properties nx.set_node_attributes(self.graph, species, "specie") nx.set_node_attributes(self.graph, coords, "coords") nx.set_node_attributes(self.graph, properties, "properties") def alter_edge(self, from_index, to_index, new_weight=None, new_edge_properties=None): """ Alters either the weight or the edge_properties of an edge in the MoleculeGraph. :param from_index: int :param to_index: int :param new_weight: alter_edge does not require that weight be altered. As such, by default, this is None. If weight is to be changed, it should be a float. :param new_edge_properties: alter_edge does not require that edge_properties be altered. As such, by default, this is None. If any edge properties are to be changed, it should be a dictionary of edge properties to be changed. :return: """ existing_edge = self.graph.get_edge_data(from_index, to_index) # ensure that edge exists before attempting to change it if not existing_edge: raise ValueError("Edge between {} and {} cannot be altered;\ no edge exists between those sites.".format( from_index, to_index )) # Third index should always be 0 because there should only be one edge between any two nodes if new_weight is not None: self.graph[from_index][to_index][0]['weight'] = new_weight if new_edge_properties is not None: for prop in list(new_edge_properties.keys()): self.graph[from_index][to_index][0][prop] = new_edge_properties[prop] def break_edge(self, from_index, to_index, allow_reverse=False): """ Remove an edge from the MoleculeGraph :param from_index: int :param to_index: int :param allow_reverse: If allow_reverse is True, then break_edge will attempt to break both (from_index, to_index) and, failing that, will attempt to break (to_index, from_index). :return: """ # ensure that edge exists before attempting to remove it existing_edge = self.graph.get_edge_data(from_index, to_index) existing_reverse = None if existing_edge: self.graph.remove_edge(from_index, to_index) else: if allow_reverse: existing_reverse = self.graph.get_edge_data(to_index, from_index) if existing_reverse: self.graph.remove_edge(to_index, from_index) else: raise ValueError("Edge cannot be broken between {} and {};\ no edge exists between those sites.".format( from_index, to_index )) def remove_nodes(self, indices): """ A wrapper for Molecule.remove_sites(). :param indices: list of indices in the current Molecule (and graph) to be removed. :return: """ self.molecule.remove_sites(indices) self.graph.remove_nodes_from(indices) mapping = {} for correct, current in enumerate(sorted(self.graph.nodes)): mapping[current] = correct nx.relabel_nodes(self.graph, mapping, copy=False) self.set_node_attributes() def split_molecule_subgraphs(self, bonds, allow_reverse=False, alterations=None): """ Split MoleculeGraph into two or more MoleculeGraphs by breaking a set of bonds. This function uses MoleculeGraph.break_edge repeatedly to create disjoint graphs (two or more separate molecules). This function does not only alter the graph information, but also changes the underlying Moledules. If the bonds parameter does not include sufficient bonds to separate two molecule fragments, then this function will fail. Currently, this function naively assigns the charge of the total molecule to a single submolecule. A later effort will be to actually accurately assign charge. NOTE: This function does not modify the original MoleculeGraph. It creates a copy, modifies that, and returns two or more new MoleculeGraph objects. :param bonds: list of tuples (from_index, to_index) representing bonds to be broken to split the MoleculeGraph. :param alterations: a dict {(from_index, to_index): alt}, where alt is a dictionary including weight and/or edge properties to be changed following the split. :param allow_reverse: If allow_reverse is True, then break_edge will attempt to break both (from_index, to_index) and, failing that, will attempt to break (to_index, from_index). :return: list of MoleculeGraphs """ self.set_node_attributes() original = copy.deepcopy(self) for bond in bonds: original.break_edge(bond[0], bond[1], allow_reverse=allow_reverse) if nx.is_weakly_connected(original.graph): raise MolGraphSplitError("Cannot split molecule; \ MoleculeGraph is still connected.") else: # alter any bonds before partition, to avoid remapping if alterations is not None: for (u, v) in alterations.keys(): if "weight" in alterations[(u, v)]: weight = alterations[(u, v)]["weight"] del alterations[(u, v)]["weight"] edge_properties = alterations[(u, v)] \ if len(alterations[(u, v)]) != 0 else None original.alter_edge(u, v, new_weight=weight, new_edge_properties=edge_properties) else: original.alter_edge(u, v, new_edge_properties=alterations[(u, v)]) sub_mols = [] # Had to use nx.weakly_connected_components because of deprecation # of nx.weakly_connected_component_subgraphs subgraphs = [original.graph.subgraph(c) for c in nx.weakly_connected_components(original.graph)] for subg in subgraphs: nodes = sorted(list(subg.nodes)) # Molecule indices are essentially list-based, so node indices # must be remapped, incrementing from 0 mapping = {} for i in range(len(nodes)): mapping[nodes[i]] = i # just give charge to whatever subgraph has node with index 0 # TODO: actually figure out how to distribute charge if 0 in nodes: charge = self.molecule.charge else: charge = 0 # relabel nodes in graph to match mapping new_graph = nx.relabel_nodes(subg, mapping) species = nx.get_node_attributes(new_graph, "specie") coords = nx.get_node_attributes(new_graph, "coords") raw_props = nx.get_node_attributes(new_graph, "properties") properties = {} for prop_set in raw_props.values(): for prop in prop_set.keys(): if prop in properties: properties[prop].append(prop_set[prop]) else: properties[prop] = [prop_set[prop]] # Site properties must be present for all atoms in the molecule # in order to be used for Molecule instantiation for k, v in properties.items(): if len(v) != len(species): del properties[k] new_mol = Molecule(species, coords, charge=charge, site_properties=properties) graph_data = json_graph.adjacency_data(new_graph) # create new MoleculeGraph sub_mols.append(MoleculeGraph(new_mol, graph_data=graph_data)) return sub_mols def build_unique_fragments(self): """ Find all possible fragment combinations of the MoleculeGraphs (in other words, all connected induced subgraphs) :return: """ self.set_node_attributes() graph = self.graph.to_undirected() # find all possible fragments, aka connected induced subgraphs frag_dict = {} for ii in range(1, len(self.molecule)): for combination in combinations(graph.nodes, ii): mycomp = [] for idx in combination: mycomp.append(str(self.molecule[idx].specie)) mycomp = "".join(sorted(mycomp)) subgraph = nx.subgraph(graph, combination) if nx.is_connected(subgraph): mykey = mycomp + str(len(subgraph.edges())) if mykey not in frag_dict: frag_dict[mykey] = [copy.deepcopy(subgraph)] else: frag_dict[mykey].append(copy.deepcopy(subgraph)) # narrow to all unique fragments using graph isomorphism unique_frag_dict = {} for key in frag_dict: unique_frags = [] for frag in frag_dict[key]: found = False for f in unique_frags: if _isomorphic(frag, f): found = True break if not found: unique_frags.append(frag) unique_frag_dict[key] = copy.deepcopy(unique_frags) # convert back to molecule graphs unique_mol_graph_dict = {} for key in unique_frag_dict: unique_mol_graph_list = [] for fragment in unique_frag_dict[key]: mapping = {e: i for i, e in enumerate(sorted(fragment.nodes))} remapped = nx.relabel_nodes(fragment, mapping) species = nx.get_node_attributes(remapped, "specie") coords = nx.get_node_attributes(remapped, "coords") edges = {} for from_index, to_index, key in remapped.edges: edge_props = fragment.get_edge_data(from_index, to_index, key=key) edges[(from_index, to_index)] = edge_props unique_mol_graph_list.append(self.with_edges(Molecule(species=species, coords=coords, charge=self.molecule.charge), edges)) frag_key = str(unique_mol_graph_list[0].molecule.composition.alphabetical_formula) + " E" + str( len(unique_mol_graph_list[0].graph.edges())) unique_mol_graph_dict[frag_key] = copy.deepcopy(unique_mol_graph_list) return unique_mol_graph_dict def substitute_group(self, index, func_grp, strategy, bond_order=1, graph_dict=None, strategy_params=None, reorder=True, extend_structure=True): """ Builds off of Molecule.substitute to replace an atom in self.molecule with a functional group. This method also amends self.graph to incorporate the new functional group. NOTE: using a MoleculeGraph will generally produce a different graph compared with using a Molecule or str (when not using graph_dict). This is because of the reordering that occurs when using some of the local_env strategies. :param index: Index of atom to substitute. :param func_grp: Substituent molecule. There are three options: 1. Providing an actual molecule as the input. The first atom must be a DummySpecie X, indicating the position of nearest neighbor. The second atom must be the next nearest atom. For example, for a methyl group substitution, func_grp should be X-CH3, where X is the first site and C is the second site. What the code will do is to remove the index site, and connect the nearest neighbor to the C atom in CH3. The X-C bond indicates the directionality to connect the atoms. 2. A string name. The molecule will be obtained from the relevant template in func_groups.json. 3. A MoleculeGraph object. :param strategy: Class from pymatgen.analysis.local_env. :param bond_order: A specified bond order to calculate the bond length between the attached functional group and the nearest neighbor site. Defaults to 1. :param graph_dict: Dictionary representing the bonds of the functional group (format: {(u, v): props}, where props is a dictionary of properties, including weight. If None, then the algorithm will attempt to automatically determine bonds using one of a list of strategies defined in pymatgen.analysis.local_env. :param strategy_params: dictionary of keyword arguments for strategy. If None, default parameters will be used. :param reorder: bool, representing if graph nodes need to be reordered following the application of the local_env strategy :param extend_structure: If True (default), then a large artificial box will be placed around the Molecule, because some strategies assume periodic boundary conditions. :return: """ def map_indices(grp): grp_map = {} # Get indices now occupied by functional group # Subtracting 1 because the dummy atom X should not count atoms = len(grp) - 1 offset = len(self.molecule) - atoms for i in range(atoms): grp_map[i] = i + offset return grp_map # Work is simplified if a graph is already in place if isinstance(func_grp, MoleculeGraph): self.molecule.substitute(index, func_grp.molecule, bond_order=bond_order) mapping = map_indices(func_grp.molecule) for (u, v) in list(func_grp.graph.edges()): edge_props = func_grp.graph.get_edge_data(u, v)[0] weight = None if "weight" in edge_props.keys(): weight = edge_props["weight"] del edge_props["weight"] self.add_edge(mapping[u], mapping[v], weight=weight, edge_properties=edge_props) else: if isinstance(func_grp, Molecule): func_grp = copy.deepcopy(func_grp) else: try: func_grp = copy.deepcopy(FunctionalGroups[func_grp]) except Exception: raise RuntimeError("Can't find functional group in list. " "Provide explicit coordinate instead") self.molecule.substitute(index, func_grp, bond_order=bond_order) mapping = map_indices(func_grp) # Remove dummy atom "X" func_grp.remove_species("X") if graph_dict is not None: for (u, v) in graph_dict.keys(): edge_props = graph_dict[(u, v)] if "weight" in edge_props.keys(): weight = edge_props["weight"] del edge_props["weight"] self.add_edge(mapping[u], mapping[v], weight=weight, edge_properties=edge_props) else: if strategy_params is None: strategy_params = {} strat = strategy(**strategy_params) graph = self.with_local_env_strategy(func_grp, strat, reorder=reorder, extend_structure=extend_structure) for (u, v) in list(graph.graph.edges()): edge_props = graph.graph.get_edge_data(u, v)[0] weight = None if "weight" in edge_props.keys(): weight = edge_props["weight"] del edge_props["weight"] if 0 not in list(graph.graph.nodes()): # If graph indices have different indexing u, v = (u - 1), (v - 1) self.add_edge(mapping[u], mapping[v], weight=weight, edge_properties=edge_props) def replace_group(self, index, func_grp, strategy, bond_order=1, graph_dict=None, strategy_params=None, reorder=True, extend_structure=True): """ Builds off of Molecule.substitute and MoleculeGraph.substitute_group to replace a functional group in self.molecule with a functional group. This method also amends self.graph to incorporate the new functional group. TODO: Figure out how to replace into a ring structure. :param index: Index of atom to substitute. :param func_grp: Substituent molecule. There are three options: 1. Providing an actual molecule as the input. The first atom must be a DummySpecie X, indicating the position of nearest neighbor. The second atom must be the next nearest atom. For example, for a methyl group substitution, func_grp should be X-CH3, where X is the first site and C is the second site. What the code will do is to remove the index site, and connect the nearest neighbor to the C atom in CH3. The X-C bond indicates the directionality to connect the atoms. 2. A string name. The molecule will be obtained from the relevant template in func_groups.json. 3. A MoleculeGraph object. :param strategy: Class from pymatgen.analysis.local_env. :param bond_order: A specified bond order to calculate the bond length between the attached functional group and the nearest neighbor site. Defaults to 1. :param graph_dict: Dictionary representing the bonds of the functional group (format: {(u, v): props}, where props is a dictionary of properties, including weight. If None, then the algorithm will attempt to automatically determine bonds using one of a list of strategies defined in pymatgen.analysis.local_env. :param strategy_params: dictionary of keyword arguments for strategy. If None, default parameters will be used. :param reorder: bool, representing if graph nodes need to be reordered following the application of the local_env strategy :param extend_structure: If True (default), then a large artificial box will be placed around the Molecule, because some strategies assume periodic boundary conditions. :return: """ self.set_node_attributes() neighbors = self.get_connected_sites(index) # If the atom at index is terminal if len(neighbors) == 1: self.substitute_group(index, func_grp, strategy, bond_order=bond_order, graph_dict=graph_dict, strategy_params=strategy_params, reorder=reorder, extend_structure=extend_structure) else: rings = self.find_rings(including=[index]) if len(rings) != 0: raise RuntimeError("Currently functional group replacement" "cannot occur at an atom within a ring" "structure.") to_remove = set() sizes = dict() disconnected = self.graph.to_undirected() disconnected.remove_node(index) for neighbor in neighbors: sizes[neighbor[2]] = len(nx.descendants(disconnected, neighbor[2])) keep = max(sizes, key=lambda x: sizes[x]) for i in sizes.keys(): if i != keep: to_remove.add(i) self.remove_nodes(list(to_remove)) self.substitute_group(index, func_grp, strategy, bond_order=bond_order, graph_dict=graph_dict, strategy_params=strategy_params, reorder=reorder, extend_structure=extend_structure) def find_rings(self, including=None): """ Find ring structures in the MoleculeGraph. :param including: list of site indices. If including is not None, then find_rings will only return those rings including the specified sites. By default, this parameter is None, and all rings will be returned. :return: dict {index:cycle}. Each entry will be a ring (cycle, in graph theory terms) including the index found in the Molecule. If there is no cycle including an index, the value will be an empty list. """ # Copies self.graph such that all edges (u, v) matched by edges (v, u) undirected = self.graph.to_undirected() directed = undirected.to_directed() cycles_nodes = [] cycles_edges = [] # Remove all two-edge cycles all_cycles = [c for c in nx.simple_cycles(directed) if len(c) > 2] # Using to_directed() will mean that each cycle always appears twice # So, we must also remove duplicates unique_sorted = [] unique_cycles = [] for cycle in all_cycles: if sorted(cycle) not in unique_sorted: unique_sorted.append(sorted(cycle)) unique_cycles.append(cycle) if including is None: cycles_nodes = unique_cycles else: for i in including: for cycle in unique_cycles: if i in cycle and cycle not in cycles_nodes: cycles_nodes.append(cycle) for cycle in cycles_nodes: edges = [] for i, e in enumerate(cycle): edges.append((cycle[i - 1], e)) cycles_edges.append(edges) return cycles_edges def get_connected_sites(self, n): """ Returns a named tuple of neighbors of site n: periodic_site, jimage, index, weight. Index is the index of the corresponding site in the original structure, weight can be None if not defined. :param n: index of Site in Molecule :param jimage: lattice vector of site :return: list of ConnectedSite tuples, sorted by closest first """ connected_sites = set() out_edges = [(u, v, d) for u, v, d in self.graph.out_edges(n, data=True)] in_edges = [(u, v, d) for u, v, d in self.graph.in_edges(n, data=True)] for u, v, d in out_edges + in_edges: weight = d.get('weight', None) if v == n: site = self.molecule[u] dist = self.molecule[v].distance(self.molecule[u]) connected_site = ConnectedSite(site=site, jimage=(0, 0, 0), index=u, weight=weight, dist=dist) else: site = self.molecule[v] dist = self.molecule[u].distance(self.molecule[v]) connected_site = ConnectedSite(site=site, jimage=(0, 0, 0), index=v, weight=weight, dist=dist) connected_sites.add(connected_site) # return list sorted by closest sites first connected_sites = list(connected_sites) connected_sites.sort(key=lambda x: x.dist) return connected_sites def get_coordination_of_site(self, n): """ Returns the number of neighbors of site n. In graph terms, simply returns degree of node corresponding to site n. :param n: index of site :return (int): """ number_of_self_loops = sum([1 for n, v in self.graph.edges(n) if n == v]) return self.graph.degree(n) - number_of_self_loops def draw_graph_to_file(self, filename="graph", diff=None, hide_unconnected_nodes=False, hide_image_edges=True, edge_colors=False, node_labels=False, weight_labels=False, image_labels=False, color_scheme="VESTA", keep_dot=False, algo="fdp"): """ Draws graph using GraphViz. The networkx graph object itself can also be drawn with networkx's in-built graph drawing methods, but note that this might give misleading results for multigraphs (edges are super-imposed on each other). If visualization is difficult to interpret, `hide_image_edges` can help, especially in larger graphs. :param filename: filename to output, will detect filetype from extension (any graphviz filetype supported, such as pdf or png) :param diff (StructureGraph): an additional graph to compare with, will color edges red that do not exist in diff and edges green that are in diff graph but not in the reference graph :param hide_unconnected_nodes: if True, hide unconnected nodes :param hide_image_edges: if True, do not draw edges that go through periodic boundaries :param edge_colors (bool): if True, use node colors to color edges :param node_labels (bool): if True, label nodes with species and site index :param weight_labels (bool): if True, label edges with weights :param image_labels (bool): if True, label edges with their periodic images (usually only used for debugging, edges to periodic images always appear as dashed lines) :param color_scheme (str): "VESTA" or "JMOL" :param keep_dot (bool): keep GraphViz .dot file for later visualization :param algo: any graphviz algo, "neato" (for simple graphs) or "fdp" (for more crowded graphs) usually give good outputs :return: """ if not which(algo): raise RuntimeError("StructureGraph graph drawing requires " "GraphViz binaries to be in the path.") # Developer note: NetworkX also has methods for drawing # graphs using matplotlib, these also work here. However, # a dedicated tool like GraphViz allows for much easier # control over graph appearance and also correctly displays # mutli-graphs (matplotlib can superimpose multiple edges). g = self.graph.copy() g.graph = {'nodesep': 10.0, 'dpi': 300, 'overlap': "false"} # add display options for nodes for n in g.nodes(): # get label by species name label = "{}({})".format(str(self.molecule[n].specie), n) if node_labels else "" # use standard color scheme for nodes c = EL_COLORS[color_scheme].get(str(self.molecule[n].specie.symbol), [0, 0, 0]) # get contrasting font color # magic numbers account for perceived luminescence # https://stackoverflow.com/questions/1855884/determine-font-color-based-on-background-color fontcolor = '#000000' if 1 - (c[0] * 0.299 + c[1] * 0.587 + c[2] * 0.114) / 255 < 0.5 else '#ffffff' # convert color to hex string color = "#{:02x}{:02x}{:02x}".format(c[0], c[1], c[2]) g.add_node(n, fillcolor=color, fontcolor=fontcolor, label=label, fontname="Helvetica-bold", style="filled", shape="circle") edges_to_delete = [] # add display options for edges for u, v, k, d in g.edges(keys=True, data=True): # retrieve from/to images, set as origin if not defined if "to_image" in d: to_image = d['to_jimage'] else: to_image = (0, 0, 0) # set edge style d['style'] = "solid" if to_image != (0, 0, 0): d['style'] = "dashed" if hide_image_edges: edges_to_delete.append((u, v, k)) # don't show edge directions d['arrowhead'] = "none" # only add labels for images that are not the origin if image_labels: d['headlabel'] = "" if to_image == (0, 0, 0) else "to {}".format((to_image)) d['arrowhead'] = "normal" if d['headlabel'] else "none" # optionally color edges using node colors color_u = g.node[u]['fillcolor'] color_v = g.node[v]['fillcolor'] d['color_uv'] = "{};0.5:{};0.5".format(color_u, color_v) if edge_colors else "#000000" # optionally add weights to graph if weight_labels: units = g.graph.get('edge_weight_units', "") if d.get('weight'): d['label'] = "{:.2f} {}".format(d['weight'], units) # update edge with our new style attributes g.edges[u, v, k].update(d) # optionally remove periodic image edges, # these can be confusing due to periodic boundaries if hide_image_edges: for edge_to_delete in edges_to_delete: g.remove_edge(*edge_to_delete) # optionally hide unconnected nodes, # these can appear when removing periodic edges if hide_unconnected_nodes: g = g.subgraph([n for n in g.degree() if g.degree()[n] != 0]) # optionally highlight differences with another graph if diff: diff = self.diff(diff, strict=True) green_edges = [] red_edges = [] for u, v, k, d in g.edges(keys=True, data=True): if (u, v, d['to_jimage']) in diff['self']: # edge has been deleted red_edges.append((u, v, k)) elif (u, v, d['to_jimage']) in diff['other']: # edge has been added green_edges.append((u, v, k)) for u, v, k in green_edges: g.edges[u, v, k].update({'color_uv': '#00ff00'}) for u, v, k in red_edges: g.edges[u, v, k].update({'color_uv': '#ff0000'}) basename, extension = os.path.splitext(filename) extension = extension[1:] write_dot(g, basename + ".dot") with open(filename, "w") as f: args = [algo, "-T", extension, basename + ".dot"] rs = subprocess.Popen(args, stdout=f, stdin=subprocess.PIPE, close_fds=True) rs.communicate() if rs.returncode != 0: raise RuntimeError("{} exited with return code {}.".format(algo, rs.returncode)) if not keep_dot: os.remove(basename + ".dot") def as_dict(self): """ As in :Class: `pymatgen.core.Molecule` except with using `to_dict_of_dicts` from NetworkX to store graph information. """ d = {"@module": self.__class__.__module__, "@class": self.__class__.__name__, "molecule": self.molecule.as_dict(), "graphs": json_graph.adjacency_data(self.graph)} return d @classmethod def from_dict(cls, d): """ As in :Class: `pymatgen.core.Molecule` except restoring graphs using `from_dict_of_dicts` from NetworkX to restore graph information. """ m = Molecule.from_dict(d['molecule']) return cls(m, d['graphs']) def _edges_to_string(self, g): header = "from to to_image " header_line = "---- ---- ------------" edge_weight_name = g.graph["edge_weight_name"] if edge_weight_name: print_weights = ["weight"] edge_label = g.graph["edge_weight_name"] edge_weight_units = g.graph["edge_weight_units"] if edge_weight_units: edge_label += " ({})".format(edge_weight_units) header += " {}".format(edge_label) header_line += " {}".format("-" * max([18, len(edge_label)])) else: print_weights = False s = header + "\n" + header_line + "\n" edges = list(g.edges(data=True)) # sort edges for consistent ordering edges.sort(key=itemgetter(0, 1)) if print_weights: for u, v, data in edges: s += "{:4} {:4} {:12} {:.3e}\n".format(u, v, str(data.get("to_jimage", (0, 0, 0))), data.get("weight", 0)) else: for u, v, data in edges: s += "{:4} {:4} {:12}\n".format(u, v, str(data.get("to_jimage", (0, 0, 0)))) return s def __str__(self): s = "Molecule Graph" s += "\nMolecule: \n{}".format(self.molecule.__str__()) s += "\nGraph: {}\n".format(self.name) s += self._edges_to_string(self.graph) return s def __repr__(self): s = "Molecule Graph" s += "\nMolecule: \n{}".format(self.molecule.__repr__()) s += "\nGraph: {}\n".format(self.name) s += self._edges_to_string(self.graph) return s def __len__(self): """ :return: length of Molecule / number of nodes in graph """ return len(self.molecule) def sort(self, key=None, reverse=False): """ Same as Molecule.sort(), also remaps nodes in graph. :param key: :param reverse: :return: """ old_molecule = self.molecule.copy() # sort Molecule self.molecule._sites = sorted(self.molecule._sites, key=key, reverse=reverse) # apply Molecule ordering to graph mapping = {idx: self.molecule.index(site) for idx, site in enumerate(old_molecule)} self.graph = nx.relabel_nodes(self.graph, mapping, copy=True) # normalize directions of edges edges_to_remove = [] edges_to_add = [] for u, v, k, d in self.graph.edges(keys=True, data=True): if v < u: new_v, new_u, new_d = u, v, d.copy() new_d['to_jimage'] = (0, 0, 0) edges_to_remove.append((u, v, k)) edges_to_add.append((new_u, new_v, new_d)) # add/delete marked edges for edges_to_remove in edges_to_remove: self.graph.remove_edge(*edges_to_remove) for (u, v, d) in edges_to_add: self.graph.add_edge(u, v, **d) def __copy__(self): return MoleculeGraph.from_dict(self.as_dict()) def __eq__(self, other): """ Two MoleculeGraphs are equal if they have equal Molecules, and have the same edges between Sites. Edge weights can be different and MoleculeGraphs can still be considered equal. :param other: MoleculeGraph :return (bool): """ # sort for consistent node indices # PeriodicSite should have a proper __hash__() value, # using its frac_coords as a convenient key try: mapping = {tuple(site.coords): self.molecule.index(site) for site in other.molecule} except ValueError: return False other_sorted = other.__copy__() other_sorted.sort(key=lambda site: mapping[tuple(site.coords)]) edges = {(u, v) for u, v, d in self.graph.edges(keys=False, data=True)} edges_other = {(u, v) for u, v, d in other_sorted.graph.edges(keys=False, data=True)} return (edges == edges_other) and \ (self.molecule == other_sorted.molecule) def isomorphic_to(self, other): """ Checks if the graphs of two MoleculeGraphs are isomorphic to one another. In order to prevent problems with misdirected edges, both graphs are converted into undirected nx.Graph objects. :param other: MoleculeGraph object to be compared. :return: bool """ if len(self.molecule) != len(other.molecule): return False elif self.molecule.composition.alphabetical_formula != other.molecule.composition.alphabetical_formula: return False elif len(self.graph.edges()) != len(other.graph.edges()): return False else: return _isomorphic(self.graph, other.graph) def diff(self, other, strict=True): """ Compares two MoleculeGraphs. Returns dict with keys 'self', 'other', 'both' with edges that are present in only one MoleculeGraph ('self' and 'other'), and edges that are present in both. The Jaccard distance is a simple measure of the dissimilarity between two MoleculeGraphs (ignoring edge weights), and is defined by 1 - (size of the intersection / size of the union) of the sets of edges. This is returned with key 'dist'. Important note: all node indices are in terms of the MoleculeGraph this method is called from, not the 'other' MoleculeGraph: there is no guarantee the node indices will be the same if the underlying Molecules are ordered differently. :param other: MoleculeGraph :param strict: if False, will compare bonds from different Molecules, with node indices replaced by Specie strings, will not count number of occurrences of bonds :return: """ if self.molecule != other.molecule and strict: return ValueError("Meaningless to compare MoleculeGraphs if " "corresponding Molecules are different.") if strict: # sort for consistent node indices # PeriodicSite should have a proper __hash__() value, # using its frac_coords as a convenient key mapping = {tuple(site.frac_coords): self.molecule.index(site) for site in other.molecule} other_sorted = other.__copy__() other_sorted.sort(key=lambda site: mapping[tuple(site.frac_coords)]) edges = {(u, v, d.get('to_jimage', (0, 0, 0))) for u, v, d in self.graph.edges(keys=False, data=True)} edges_other = {(u, v, d.get('to_jimage', (0, 0, 0))) for u, v, d in other_sorted.graph.edges(keys=False, data=True)} else: edges = {(str(self.molecule[u].specie), str(self.molecule[v].specie)) for u, v, d in self.graph.edges(keys=False, data=True)} edges_other = {(str(other.structure[u].specie), str(other.structure[v].specie)) for u, v, d in other.graph.edges(keys=False, data=True)} if len(edges) == 0 and len(edges_other) == 0: jaccard_dist = 0 # by definition else: jaccard_dist = 1 - len(edges.intersection(edges_other)) / len(edges.union(edges_other)) return { 'self': edges - edges_other, 'other': edges_other - edges, 'both': edges.intersection(edges_other), 'dist': jaccard_dist }
fraricci/pymatgen
pymatgen/analysis/graphs.py
Python
mit
114,612
[ "CRYSTAL", "Jmol", "pymatgen" ]
b0f2b5d669e351abad7e5ab3a1461f0b26d183a50307c129504f3a99d034dba7
"""Test of PBLAS Level 2 & 3 : rk, r2k, gemv, gemm. The test generates random matrices A0, B0, X0, etc. on a 1-by-1 BLACS grid. They are redistributed to a mprocs-by-nprocs BLACS grid, BLAS operations are performed in parallel, and results are compared against BLAS. """ import sys import numpy as np from gpaw.mpi import world, rank from gpaw.test import equal from gpaw.blacs import BlacsGrid, Redistributor, parallelprint from gpaw.utilities import compiled_with_sl from gpaw.utilities.blas import gemm, gemv, r2k, rk from gpaw.utilities.scalapack import pblas_simple_gemm, pblas_simple_gemv, \ pblas_simple_r2k, pblas_simple_rk import _gpaw tol = 4.0e-13 # may need to be be increased if the mprocs-by-nprocs \ # BLACS grid becomes larger def main(M=160, N=120, K=140, seed=42, mprocs=2, nprocs=2, dtype=float): gen = np.random.RandomState(seed) grid = BlacsGrid(world, mprocs, nprocs) if (dtype==complex): epsilon = 1.0j else: epsilon = 0.0 # Create descriptors for matrices on master: globA = grid.new_descriptor(M, K, M, K) globB = grid.new_descriptor(K, N, K, N) globC = grid.new_descriptor(M, N, M, N) globZ = grid.new_descriptor(K, K, K, K) globX = grid.new_descriptor(K, 1, K, 1) globY = grid.new_descriptor(M, 1, M, 1) globD = grid.new_descriptor(M, K, M, K) globS = grid.new_descriptor(M, M, M, M) globU = grid.new_descriptor(M, M, M, M) # print globA.asarray() # Populate matrices local to master: A0 = gen.rand(*globA.shape) + epsilon * gen.rand(*globA.shape) B0 = gen.rand(*globB.shape) + epsilon * gen.rand(*globB.shape) D0 = gen.rand(*globD.shape) + epsilon * gen.rand(*globD.shape) X0 = gen.rand(*globX.shape) + epsilon * gen.rand(*globX.shape) # Local result matrices Y0 = globY.empty(dtype=dtype) C0 = globC.zeros(dtype=dtype) Z0 = globZ.zeros(dtype=dtype) S0 = globS.zeros(dtype=dtype) # zeros needed for rank-updates U0 = globU.zeros(dtype=dtype) # zeros needed for rank-updates # Local reference matrix product: if rank == 0: # C0[:] = np.dot(A0, B0) gemm(1.0, B0, A0, 0.0, C0) #gemm(1.0, A0, A0, 0.0, Z0, transa='t') print A0.shape, Z0.shape Z0[:] = np.dot(A0.T, A0) # Y0[:] = np.dot(A0, X0) gemv(1.0, A0, X0.ravel(), 0.0, Y0.ravel()) r2k(1.0, A0, D0, 0.0, S0) rk(1.0, A0, 0.0, U0) assert globA.check(A0) and globB.check(B0) and globC.check(C0) assert globX.check(X0) and globY.check(Y0) assert globD.check(D0) and globS.check(S0) and globU.check(U0) # Create distributed destriptors with various block sizes: distA = grid.new_descriptor(M, K, 2, 2) distB = grid.new_descriptor(K, N, 2, 4) distC = grid.new_descriptor(M, N, 3, 2) distZ = grid.new_descriptor(K, K, 5, 7) distX = grid.new_descriptor(K, 1, 4, 1) distY = grid.new_descriptor(M, 1, 3, 1) distD = grid.new_descriptor(M, K, 2, 3) distS = grid.new_descriptor(M, M, 2, 2) distU = grid.new_descriptor(M, M, 2, 2) # Distributed matrices: A = distA.empty(dtype=dtype) B = distB.empty(dtype=dtype) C = distC.empty(dtype=dtype) Z = distZ.empty(dtype=dtype) X = distX.empty(dtype=dtype) Y = distY.empty(dtype=dtype) D = distD.empty(dtype=dtype) S = distS.zeros(dtype=dtype) # zeros needed for rank-updates U = distU.zeros(dtype=dtype) # zeros needed for rank-updates Redistributor(world, globA, distA).redistribute(A0, A) Redistributor(world, globB, distB).redistribute(B0, B) Redistributor(world, globX, distX).redistribute(X0, X) Redistributor(world, globD, distD).redistribute(D0, D) pblas_simple_gemm(distA, distB, distC, A, B, C) pblas_simple_gemm(distA, distA, distZ, A, A, Z, transa='T') pblas_simple_gemv(distA, distX, distY, A, X, Y) pblas_simple_r2k(distA, distD, distS, A, D, S) pblas_simple_rk(distA, distU, A, U) # Collect result back on master C1 = globC.empty(dtype=dtype) Y1 = globY.empty(dtype=dtype) S1 = globS.zeros(dtype=dtype) # zeros needed for rank-updates U1 = globU.zeros(dtype=dtype) # zeros needed for rank-updates Redistributor(world, distC, globC).redistribute(C, C1) Redistributor(world, distY, globY).redistribute(Y, Y1) Redistributor(world, distS, globS).redistribute(S, S1) Redistributor(world, distU, globU).redistribute(U, U1) if rank == 0: gemm_err = abs(C1 - C0).max() gemv_err = abs(Y1 - Y0).max() r2k_err = abs(S1 - S0).max() rk_err = abs(U1 - U0).max() print 'gemm err', gemm_err print 'gemv err', gemv_err print 'r2k err' , r2k_err print 'rk_err' , rk_err else: gemm_err = 0.0 gemv_err = 0.0 r2k_err = 0.0 rk_err = 0.0 gemm_err = world.sum(gemm_err) # We don't like exceptions on only one cpu gemv_err = world.sum(gemv_err) r2k_err = world.sum(r2k_err) rk_err = world.sum(rk_err) equal(gemm_err, 0, tol) equal(gemv_err, 0, tol) equal(r2k_err, 0, tol) equal(rk_err,0, tol) if __name__ in ['__main__', '__builtin__']: if not compiled_with_sl(): print('Not built with ScaLAPACK. Test does not apply.') else: main(dtype=float) main(dtype=complex)
robwarm/gpaw-symm
gpaw/test/parallel/pblas.py
Python
gpl-3.0
5,362
[ "GPAW" ]
98e26530595bd46c2fb172a2d8b79b9f1abd2659e50c69d4c698cdff5c113304
#!/usr/bin/env python3 import itertools from collections import defaultdict import logging from operator import mul import networkx as nx import numpy as np import pandas as pd from pgmpy.base import DirectedGraph from pgmpy.factors.discrete import TabularCPD, JointProbabilityDistribution, DiscreteFactor from pgmpy.independencies import Independencies from pgmpy.extern import six from pgmpy.extern.six.moves import range, reduce from pgmpy.models.MarkovModel import MarkovModel class BayesianModel(DirectedGraph): """ Base class for bayesian model. A models stores nodes and edges with conditional probability distribution (cpd) and other attributes. models hold directed edges. Self loops are not allowed neither multiple (parallel) edges. Nodes can be any hashable python object. Edges are represented as links between nodes. Parameters ---------- data : input graph Data to initialize graph. If data=None (default) an empty graph is created. The data can be an edge list, or any NetworkX graph object. Examples -------- Create an empty bayesian model with no nodes and no edges. >>> from pgmpy.models import BayesianModel >>> G = BayesianModel() G can be grown in several ways. **Nodes:** Add one node at a time: >>> G.add_node('a') Add the nodes from any container (a list, set or tuple or the nodes from another graph). >>> G.add_nodes_from(['a', 'b']) **Edges:** G can also be grown by adding edges. Add one edge, >>> G.add_edge('a', 'b') a list of edges, >>> G.add_edges_from([('a', 'b'), ('b', 'c')]) If some edges connect nodes not yet in the model, the nodes are added automatically. There are no errors when adding nodes or edges that already exist. **Shortcuts:** Many common graph features allow python syntax for speed reporting. >>> 'a' in G # check if node in graph True >>> len(G) # number of nodes in graph 3 """ def __init__(self, ebunch=None): super(BayesianModel, self).__init__() if ebunch: self.add_edges_from(ebunch) self.cpds = [] self.cardinalities = defaultdict(int) def add_edge(self, u, v, **kwargs): """ Add an edge between u and v. The nodes u and v will be automatically added if they are not already in the graph Parameters ---------- u,v : nodes Nodes can be any hashable python object. Examples -------- >>> from pgmpy.models import BayesianModel/home/abinash/software_packages/numpy-1.7.1 >>> G = BayesianModel() >>> G.add_nodes_from(['grade', 'intel']) >>> G.add_edge('grade', 'intel') """ if u == v: raise ValueError('Self loops are not allowed.') if u in self.nodes() and v in self.nodes() and nx.has_path(self, v, u): raise ValueError( 'Loops are not allowed. Adding the edge from (%s->%s) forms a loop.' % (u, v)) else: super(BayesianModel, self).add_edge(u, v, **kwargs) def remove_node(self, node): """ Remove node from the model. Removing a node also removes all the associated edges, removes the CPD of the node and marginalizes the CPDs of it's children. Parameters ---------- node : node Node which is to be removed from the model. Returns ------- None Examples -------- >>> import pandas as pd >>> import numpy as np >>> from pgmpy.models import BayesianModel >>> model = BayesianModel([('A', 'B'), ('B', 'C'), ... ('A', 'D'), ('D', 'C')]) >>> values = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 4)), ... columns=['A', 'B', 'C', 'D']) >>> model.fit(values) >>> model.get_cpds() [<TabularCPD representing P(A:2) at 0x7f28248e2438>, <TabularCPD representing P(B:2 | A:2) at 0x7f28248e23c8>, <TabularCPD representing P(C:2 | B:2, D:2) at 0x7f28248e2748>, <TabularCPD representing P(D:2 | A:2) at 0x7f28248e26a0>] >>> model.remove_node('A') >>> model.get_cpds() [<TabularCPD representing P(B:2) at 0x7f28248e23c8>, <TabularCPD representing P(C:2 | B:2, D:2) at 0x7f28248e2748>, <TabularCPD representing P(D:2) at 0x7f28248e26a0>] """ affected_nodes = [v for u, v in self.edges() if u == node] for affected_node in affected_nodes: node_cpd = self.get_cpds(node=affected_node) if node_cpd: node_cpd.marginalize([node], inplace=True) if self.get_cpds(node=node): self.remove_cpds(node) super(BayesianModel, self).remove_node(node) def remove_nodes_from(self, nodes): """ Remove multiple nodes from the model. Removing a node also removes all the associated edges, removes the CPD of the node and marginalizes the CPDs of it's children. Parameters ---------- nodes : list, set (iterable) Nodes which are to be removed from the model. Returns ------- None Examples -------- >>> import pandas as pd >>> import numpy as np >>> from pgmpy.models import BayesianModel >>> model = BayesianModel([('A', 'B'), ('B', 'C'), ... ('A', 'D'), ('D', 'C')]) >>> values = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 4)), ... columns=['A', 'B', 'C', 'D']) >>> model.fit(values) >>> model.get_cpds() [<TabularCPD representing P(A:2) at 0x7f28248e2438>, <TabularCPD representing P(B:2 | A:2) at 0x7f28248e23c8>, <TabularCPD representing P(C:2 | B:2, D:2) at 0x7f28248e2748>, <TabularCPD representing P(D:2 | A:2) at 0x7f28248e26a0>] >>> model.remove_nodes_from(['A', 'B']) >>> model.get_cpds() [<TabularCPD representing P(C:2 | D:2) at 0x7f28248e2a58>, <TabularCPD representing P(D:2) at 0x7f28248e26d8>] """ for node in nodes: self.remove_node(node) def add_cpds(self, *cpds): """ Add CPD (Conditional Probability Distribution) to the Bayesian Model. Parameters ---------- cpds : list, set, tuple (array-like) List of CPDs which will be associated with the model EXAMPLE ------- >>> from pgmpy.models import BayesianModel >>> from pgmpy.factors.discrete.CPD import TabularCPD >>> student = BayesianModel([('diff', 'grades'), ('intel', 'grades')]) >>> grades_cpd = TabularCPD('grades', 3, [[0.1,0.1,0.1,0.1,0.1,0.1], ... [0.1,0.1,0.1,0.1,0.1,0.1], ... [0.8,0.8,0.8,0.8,0.8,0.8]], ... evidence=['diff', 'intel'], evidence_card=[2, 3]) >>> student.add_cpds(grades_cpd) +------+-----------------------+---------------------+ |diff: | easy | hard | +------+------+------+---------+------+------+-------+ |intel:| dumb | avg | smart | dumb | avg | smart | +------+------+------+---------+------+------+-------+ |gradeA| 0.1 | 0.1 | 0.1 | 0.1 | 0.1 | 0.1 | +------+------+------+---------+------+------+-------+ |gradeB| 0.1 | 0.1 | 0.1 | 0.1 | 0.1 | 0.1 | +------+------+------+---------+------+------+-------+ |gradeC| 0.8 | 0.8 | 0.8 | 0.8 | 0.8 | 0.8 | +------+------+------+---------+------+------+-------+ """ for cpd in cpds: if not isinstance(cpd, TabularCPD): raise ValueError('Only TabularCPD can be added.') if set(cpd.variables) - set(cpd.variables).intersection( set(self.nodes())): raise ValueError('CPD defined on variable not in the model', cpd) for prev_cpd_index in range(len(self.cpds)): if self.cpds[prev_cpd_index].variable == cpd.variable: logging.warning("Replacing existing CPD for {var}".format(var=cpd.variable)) self.cpds[prev_cpd_index] = cpd break else: self.cpds.append(cpd) def get_cpds(self, node=None): """ Returns the cpd of the node. If node is not specified returns all the CPDs that have been added till now to the graph Parameter --------- node: any hashable python object (optional) The node whose CPD we want. If node not specified returns all the CPDs added to the model. Returns ------- A list of TabularCPDs. Examples -------- >>> from pgmpy.models import BayesianModel >>> from pgmpy.factors.discrete import TabularCPD >>> student = BayesianModel([('diff', 'grade'), ('intel', 'grade')]) >>> cpd = TabularCPD('grade', 2, [[0.1, 0.9, 0.2, 0.7], ... [0.9, 0.1, 0.8, 0.3]], ... ['intel', 'diff'], [2, 2]) >>> student.add_cpds(cpd) >>> student.get_cpds() """ if node: if node not in self.nodes(): raise ValueError('Node not present in the Directed Graph') for cpd in self.cpds: if cpd.variable == node: return cpd raise ValueError("CPD not added for the node: {node}".format(node=node)) else: return self.cpds def remove_cpds(self, *cpds): """ Removes the cpds that are provided in the argument. Parameters ---------- *cpds: TabularCPD object A CPD object on any subset of the variables of the model which is to be associated with the model. Examples -------- >>> from pgmpy.models import BayesianModel >>> from pgmpy.factors.discrete import TabularCPD >>> student = BayesianModel([('diff', 'grade'), ('intel', 'grade')]) >>> cpd = TabularCPD('grade', 2, [[0.1, 0.9, 0.2, 0.7], ... [0.9, 0.1, 0.8, 0.3]], ... ['intel', 'diff'], [2, 2]) >>> student.add_cpds(cpd) >>> student.remove_cpds(cpd) """ for cpd in cpds: if isinstance(cpd, six.string_types): cpd = self.get_cpds(cpd) self.cpds.remove(cpd) def get_cardinality(self, node): """ Returns the cardinality of the node. Throws an error if the CPD for the queried node hasn't been added to the network. Parameters ---------- node: Any hashable python object. Returns ------- int: The cardinality of the node. """ return self.get_cpds(node).cardinality[0] def check_model(self): """ Check the model for various errors. This method checks for the following errors. * Checks if the sum of the probabilities for each state is equal to 1 (tol=0.01). * Checks if the CPDs associated with nodes are consistent with their parents. Returns ------- check: boolean True if all the checks are passed """ for node in self.nodes(): cpd = self.get_cpds(node=node) if isinstance(cpd, TabularCPD): evidence = cpd.variables[:0:-1] parents = self.get_parents(node) if set(evidence if evidence else []) != set(parents if parents else []): raise ValueError("CPD associated with %s doesn't have " "proper parents associated with it." % node) # TODO: need to fix the order in which BIF file probabilities are read # if not np.allclose(cpd.to_factor().marginalize([node], inplace=False).values.flatten('C'), # np.ones(np.product(cpd.cardinality[:0:-1])), # atol=0.01): # raise ValueError('Sum of probabilites of states for node %s' # ' is not equal to 1.' % node) return True def _get_ancestors_of(self, obs_nodes_list): """ Returns a dictionary of all ancestors of all the observed nodes including the node itself. Parameters ---------- obs_nodes_list: string, list-type name of all the observed nodes Examples -------- >>> from pgmpy.models import BayesianModel >>> model = BayesianModel([('D', 'G'), ('I', 'G'), ('G', 'L'), ... ('I', 'L')]) >>> model._get_ancestors_of('G') {'D', 'G', 'I'} >>> model._get_ancestors_of(['G', 'I']) {'D', 'G', 'I'} """ if not isinstance(obs_nodes_list, (list, tuple)): obs_nodes_list = [obs_nodes_list] for node in obs_nodes_list: if node not in self.nodes(): raise ValueError('Node {s} not in not in graph'.format(s=node)) ancestors_list = set() nodes_list = set(obs_nodes_list) while nodes_list: node = nodes_list.pop() if node not in ancestors_list: nodes_list.update(self.predecessors(node)) ancestors_list.add(node) return ancestors_list def active_trail_nodes(self, variables, observed=None): """ Returns a dictionary with the given variables as keys and all the nodes reachable from that respective variable as values. Parameters ---------- variables: str or array like variables whose active trails are to be found. observed : List of nodes (optional) If given the active trails would be computed assuming these nodes to be observed. Examples -------- >>> from pgmpy.models import BayesianModel >>> student = BayesianModel() >>> student.add_nodes_from(['diff', 'intel', 'grades']) >>> student.add_edges_from([('diff', 'grades'), ('intel', 'grades')]) >>> student.active_trail_nodes('diff') {'diff': {'diff', 'grades'}} >>> student.active_trail_nodes(['diff', 'intel'], observed='grades') {'diff': {'diff', 'intel'}, 'intel': {'diff', 'intel'}} References ---------- Details of the algorithm can be found in 'Probabilistic Graphical Model Principles and Techniques' - Koller and Friedman Page 75 Algorithm 3.1 """ if observed: observed_list = observed if isinstance(observed, (list, tuple)) else [observed] else: observed_list = [] ancestors_list = self._get_ancestors_of(observed_list) # Direction of flow of information # up -> from parent to child # down -> from child to parent active_trails = {} for start in variables if isinstance(variables, (list, tuple)) else [variables]: visit_list = set() visit_list.add((start, 'up')) traversed_list = set() active_nodes = set() while visit_list: node, direction = visit_list.pop() if (node, direction) not in traversed_list: if node not in observed_list: active_nodes.add(node) traversed_list.add((node, direction)) if direction == 'up' and node not in observed_list: for parent in self.predecessors(node): visit_list.add((parent, 'up')) for child in self.successors(node): visit_list.add((child, 'down')) elif direction == 'down': if node not in observed_list: for child in self.successors(node): visit_list.add((child, 'down')) if node in ancestors_list: for parent in self.predecessors(node): visit_list.add((parent, 'up')) active_trails[start] = active_nodes return active_trails def local_independencies(self, variables): """ Returns an instance of Independencies containing the local independencies of each of the variables. Parameters ---------- variables: str or array like variables whose local independencies are to be found. Examples -------- >>> from pgmpy.models import BayesianModel >>> student = BayesianModel() >>> student.add_edges_from([('diff', 'grade'), ('intel', 'grade'), >>> ('grade', 'letter'), ('intel', 'SAT')]) >>> ind = student.local_independencies('grade') >>> ind (grade _|_ SAT | diff, intel) """ def dfs(node): """ Returns the descendents of node. Since Bayesian Networks are acyclic, this is a very simple dfs which does not remember which nodes it has visited. """ descendents = [] visit = [node] while visit: n = visit.pop() neighbors = self.neighbors(n) visit.extend(neighbors) descendents.extend(neighbors) return descendents independencies = Independencies() for variable in variables if isinstance(variables, (list, tuple)) else [variables]: non_descendents = set(self.nodes()) - {variable} - set(dfs(variable)) parents = set(self.get_parents(variable)) if non_descendents - parents: independencies.add_assertions([variable, non_descendents - parents, parents]) return independencies def is_active_trail(self, start, end, observed=None): """ Returns True if there is any active trail between start and end node Parameters ---------- start : Graph Node end : Graph Node observed : List of nodes (optional) If given the active trail would be computed assuming these nodes to be observed. additional_observed : List of nodes (optional) If given the active trail would be computed assuming these nodes to be observed along with the nodes marked as observed in the model. Examples -------- >>> from pgmpy.models import BayesianModel >>> student = BayesianModel() >>> student.add_nodes_from(['diff', 'intel', 'grades', 'letter', 'sat']) >>> student.add_edges_from([('diff', 'grades'), ('intel', 'grades'), ('grades', 'letter'), ... ('intel', 'sat')]) >>> student.is_active_trail('diff', 'intel') False >>> student.is_active_trail('grades', 'sat') True """ if end in self.active_trail_nodes(start, observed)[start]: return True else: return False def get_independencies(self, latex=False): """ Computes independencies in the Bayesian Network, by checking d-seperation. Parameters ---------- latex: boolean If latex=True then latex string of the independence assertion would be created. Examples -------- >>> from pgmpy.models import BayesianModel >>> chain = BayesianModel([('X', 'Y'), ('Y', 'Z')]) >>> chain.get_independencies() (X _|_ Z | Y) (Z _|_ X | Y) """ independencies = Independencies() for start in (self.nodes()): rest = set(self.nodes()) - {start} for r in range(len(rest)): for observed in itertools.combinations(rest, r): d_seperated_variables = rest - set(observed) - set( self.active_trail_nodes(start, observed=observed)[start]) if d_seperated_variables: independencies.add_assertions([start, d_seperated_variables, observed]) independencies.reduce() if not latex: return independencies else: return independencies.latex_string() def to_markov_model(self): """ Converts bayesian model to markov model. The markov model created would be the moral graph of the bayesian model. Examples -------- >>> from pgmpy.models import BayesianModel >>> G = BayesianModel([('diff', 'grade'), ('intel', 'grade'), ... ('intel', 'SAT'), ('grade', 'letter')]) >>> mm = G.to_markov_model() >>> mm.nodes() ['diff', 'grade', 'intel', 'SAT', 'letter'] >>> mm.edges() [('diff', 'intel'), ('diff', 'grade'), ('intel', 'grade'), ('intel', 'SAT'), ('grade', 'letter')] """ moral_graph = self.moralize() mm = MarkovModel(moral_graph.edges()) mm.add_factors(*[cpd.to_factor() for cpd in self.cpds]) return mm def to_junction_tree(self): """ Creates a junction tree (or clique tree) for a given bayesian model. For converting a Bayesian Model into a Clique tree, first it is converted into a Markov one. For a given markov model (H) a junction tree (G) is a graph 1. where each node in G corresponds to a maximal clique in H 2. each sepset in G separates the variables strictly on one side of the edge to other. Examples -------- >>> from pgmpy.models import BayesianModel >>> from pgmpy.factors.discrete import TabularCPD >>> G = BayesianModel([('diff', 'grade'), ('intel', 'grade'), ... ('intel', 'SAT'), ('grade', 'letter')]) >>> diff_cpd = TabularCPD('diff', 2, [[0.2], [0.8]]) >>> intel_cpd = TabularCPD('intel', 3, [[0.5], [0.3], [0.2]]) >>> grade_cpd = TabularCPD('grade', 3, ... [[0.1,0.1,0.1,0.1,0.1,0.1], ... [0.1,0.1,0.1,0.1,0.1,0.1], ... [0.8,0.8,0.8,0.8,0.8,0.8]], ... evidence=['diff', 'intel'], ... evidence_card=[2, 3]) >>> sat_cpd = TabularCPD('SAT', 2, ... [[0.1, 0.2, 0.7], ... [0.9, 0.8, 0.3]], ... evidence=['intel'], evidence_card=[3]) >>> letter_cpd = TabularCPD('letter', 2, ... [[0.1, 0.4, 0.8], ... [0.9, 0.6, 0.2]], ... evidence=['grade'], evidence_card=[3]) >>> G.add_cpds(diff_cpd, intel_cpd, grade_cpd, sat_cpd, letter_cpd) >>> jt = G.to_junction_tree() """ mm = self.to_markov_model() return mm.to_junction_tree() def fit(self, data, estimator=None, state_names=[], complete_samples_only=True, **kwargs): """ Estimates the CPD for each variable based on a given data set. Parameters ---------- data: pandas DataFrame object DataFrame object with column names identical to the variable names of the network. (If some values in the data are missing the data cells should be set to `numpy.NaN`. Note that pandas converts each column containing `numpy.NaN`s to dtype `float`.) estimator: Estimator class One of: - MaximumLikelihoodEstimator (default) - BayesianEstimator: In this case, pass 'prior_type' and either 'pseudo_counts' or 'equivalent_sample_size' as additional keyword arguments. See `BayesianEstimator.get_parameters()` for usage. state_names: dict (optional) A dict indicating, for each variable, the discrete set of states that the variable can take. If unspecified, the observed values in the data set are taken to be the only possible states. complete_samples_only: bool (default `True`) Specifies how to deal with missing data, if present. If set to `True` all rows that contain `np.Nan` somewhere are ignored. If `False` then, for each variable, every row where neither the variable nor its parents are `np.NaN` is used. Examples -------- >>> import pandas as pd >>> from pgmpy.models import BayesianModel >>> from pgmpy.estimators import MaximumLikelihoodEstimator >>> data = pd.DataFrame(data={'A': [0, 0, 1], 'B': [0, 1, 0], 'C': [1, 1, 0]}) >>> model = BayesianModel([('A', 'C'), ('B', 'C')]) >>> model.fit(data) >>> model.get_cpds() [<TabularCPD representing P(A:2) at 0x7fb98a7d50f0>, <TabularCPD representing P(B:2) at 0x7fb98a7d5588>, <TabularCPD representing P(C:2 | A:2, B:2) at 0x7fb98a7b1f98>] """ from pgmpy.estimators import MaximumLikelihoodEstimator, BayesianEstimator, BaseEstimator if estimator is None: estimator = MaximumLikelihoodEstimator else: if not issubclass(estimator, BaseEstimator): raise TypeError("Estimator object should be a valid pgmpy estimator.") _estimator = estimator(self, data, state_names=state_names, complete_samples_only=complete_samples_only) cpds_list = _estimator.get_parameters(**kwargs) self.add_cpds(*cpds_list) def predict(self, data): """ Predicts states of all the missing variables. Parameters ---------- data : pandas DataFrame object A DataFrame object with column names same as the variables in the model. Examples -------- >>> import numpy as np >>> import pandas as pd >>> from pgmpy.models import BayesianModel >>> values = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 5)), ... columns=['A', 'B', 'C', 'D', 'E']) >>> train_data = values[:800] >>> predict_data = values[800:] >>> model = BayesianModel([('A', 'B'), ('C', 'B'), ('C', 'D'), ('B', 'E')]) >>> model.fit(values) >>> predict_data = predict_data.copy() >>> predict_data.drop('E', axis=1, inplace=True) >>> y_pred = model.predict(predict_data) >>> y_pred E 800 0 801 1 802 1 803 1 804 0 ... ... 993 0 994 0 995 1 996 1 997 0 998 0 999 0 """ from pgmpy.inference import VariableElimination if set(data.columns) == set(self.nodes()): raise ValueError("No variable missing in data. Nothing to predict") elif set(data.columns) - set(self.nodes()): raise ValueError("Data has variables which are not in the model") missing_variables = set(self.nodes()) - set(data.columns) pred_values = defaultdict(list) # Send state_names dict from one of the estimated CPDs to the inference class. model_inference = VariableElimination(self, state_names=self.get_cpds()[0].state_names) for index, data_point in data.iterrows(): states_dict = model_inference.map_query(variables=missing_variables, evidence=data_point.to_dict()) for k, v in states_dict.items(): pred_values[k].append(v) return pd.DataFrame(pred_values, index=data.index) def predict_probability(self, data): """ Predicts probabilities of all states of the missing variables. Parameters ---------- data : pandas DataFrame object A DataFrame object with column names same as the variables in the model. Examples -------- >>> import numpy as np >>> import pandas as pd >>> from pgmpy.models import BayesianModel >>> values = pd.DataFrame(np.random.randint(low=0, high=2, size=(100, 5)), ... columns=['A', 'B', 'C', 'D', 'E']) >>> train_data = values[:80] >>> predict_data = values[80:] >>> model = BayesianModel([('A', 'B'), ('C', 'B'), ('C', 'D'), ('B', 'E')]) >>> model.fit(values) >>> predict_data = predict_data.copy() >>> predict_data.drop('B', axis=1, inplace=True) >>> y_prob = model.predict_probability(predict_data) >>> y_prob B_0 B_1 80 0.439178 0.560822 81 0.581970 0.418030 82 0.488275 0.511725 83 0.581970 0.418030 84 0.510794 0.489206 85 0.439178 0.560822 86 0.439178 0.560822 87 0.417124 0.582876 88 0.407978 0.592022 89 0.429905 0.570095 90 0.581970 0.418030 91 0.407978 0.592022 92 0.429905 0.570095 93 0.429905 0.570095 94 0.439178 0.560822 95 0.407978 0.592022 96 0.559904 0.440096 97 0.417124 0.582876 98 0.488275 0.511725 99 0.407978 0.592022 """ from pgmpy.inference import VariableElimination if set(data.columns) == set(self.nodes()): raise ValueError("No variable missing in data. Nothing to predict") elif set(data.columns) - set(self.nodes()): raise ValueError("Data has variables which are not in the model") missing_variables = set(self.nodes()) - set(data.columns) pred_values = defaultdict(list) model_inference = VariableElimination(self) for index, data_point in data.iterrows(): states_dict = model_inference.query(variables=missing_variables, evidence=data_point.to_dict()) for k, v in states_dict.items(): for l in range(len(v.values)): state = self.get_cpds(k).state_names[k][l] pred_values[k + '_' + str(state)].append(v.values[l]) return pd.DataFrame(pred_values, index=data.index) def get_factorized_product(self, latex=False): # TODO: refer to IMap class for explanation why this is not implemented. pass def get_immoralities(self): """ Finds all the immoralities in the model A v-structure X -> Z <- Y is an immorality if there is no direct edge between X and Y . Returns ------- set: A set of all the immoralities in the model Examples --------- >>> from pgmpy.models import BayesianModel >>> student = BayesianModel() >>> student.add_edges_from([('diff', 'grade'), ('intel', 'grade'), ... ('intel', 'SAT'), ('grade', 'letter')]) >>> student.get_immoralities() {('diff','intel')} """ immoralities = set() for node in self.nodes(): for parents in itertools.combinations(self.predecessors(node), 2): if not self.has_edge(parents[0], parents[1]) and not self.has_edge(parents[1], parents[0]): immoralities.add(tuple(sorted(parents))) return immoralities def is_iequivalent(self, model): """ Checks whether the given model is I-equivalent Two graphs G1 and G2 are said to be I-equivalent if they have same skeleton and have same set of immoralities. Note: For same skeleton different names of nodes can work but for immoralities names of nodes must be same Parameters ---------- model : A Bayesian model object, for which you want to check I-equivalence Returns -------- boolean : True if both are I-equivalent, False otherwise Examples -------- >>> from pgmpy.models import BayesianModel >>> G = BayesianModel() >>> G.add_edges_from([('V', 'W'), ('W', 'X'), ... ('X', 'Y'), ('Z', 'Y')]) >>> G1 = BayesianModel() >>> G1.add_edges_from([('W', 'V'), ('X', 'W'), ... ('X', 'Y'), ('Z', 'Y')]) >>> G.is_iequivalent(G1) True """ if not isinstance(model, BayesianModel): raise TypeError('model must be an instance of Bayesian Model') skeleton = nx.algorithms.isomorphism.GraphMatcher(self.to_undirected(), model.to_undirected()) if skeleton.is_isomorphic() and self.get_immoralities() == model.get_immoralities(): return True return False def is_imap(self, JPD): """ Checks whether the bayesian model is Imap of given JointProbabilityDistribution Parameters ----------- JPD : An instance of JointProbabilityDistribution Class, for which you want to check the Imap Returns -------- boolean : True if bayesian model is Imap for given Joint Probability Distribution False otherwise Examples -------- >>> from pgmpy.models import BayesianModel >>> from pgmpy.factors.discrete import TabularCPD >>> from pgmpy.factors.discrete import JointProbabilityDistribution >>> G = BayesianModel([('diff', 'grade'), ('intel', 'grade')]) >>> diff_cpd = TabularCPD('diff', 2, [[0.2], [0.8]]) >>> intel_cpd = TabularCPD('intel', 3, [[0.5], [0.3], [0.2]]) >>> grade_cpd = TabularCPD('grade', 3, ... [[0.1,0.1,0.1,0.1,0.1,0.1], ... [0.1,0.1,0.1,0.1,0.1,0.1], ... [0.8,0.8,0.8,0.8,0.8,0.8]], ... evidence=['diff', 'intel'], ... evidence_card=[2, 3]) >>> G.add_cpds(diff_cpd, intel_cpd, grade_cpd) >>> val = [0.01, 0.01, 0.08, 0.006, 0.006, 0.048, 0.004, 0.004, 0.032, 0.04, 0.04, 0.32, 0.024, 0.024, 0.192, 0.016, 0.016, 0.128] >>> JPD = JointProbabilityDistribution(['diff', 'intel', 'grade'], [2, 3, 3], val) >>> G.is_imap(JPD) True """ if not isinstance(JPD, JointProbabilityDistribution): raise TypeError("JPD must be an instance of JointProbabilityDistribution") factors = [cpd.to_factor() for cpd in self.get_cpds()] factor_prod = reduce(mul, factors) JPD_fact = DiscreteFactor(JPD.variables, JPD.cardinality, JPD.values) if JPD_fact == factor_prod: return True else: return False def copy(self): """ Returns a copy of the model. Returns ------- BayesianModel: Copy of the model on which the method was called. Examples -------- >>> from pgmpy.models import BayesianModel >>> from pgmpy.factors.discrete import TabularCPD >>> model = BayesianModel([('A', 'B'), ('B', 'C')]) >>> cpd_a = TabularCPD('A', 2, [[0.2], [0.8]]) >>> cpd_b = TabularCPD('B', 2, [[0.3, 0.7], [0.7, 0.3]], evidence=['A'], evidence_card=[2]) >>> cpd_c = TabularCPD('C', 2, [[0.1, 0.9], [0.9, 0.1]], evidence=['B'], evidence_card=[2]) >>> model.add_cpds(cpd_a, cpd_b, cpd_c) >>> copy_model = model.copy() >>> copy_model.nodes() ['C', 'A', 'B'] >>> copy_model.edges() [('A', 'B'), ('B', 'C')] >>> copy_model.get_cpds() [<TabularCPD representing P(A:2) at 0x7f2824930a58>, <TabularCPD representing P(B:2 | A:2) at 0x7f2824930a90>, <TabularCPD representing P(C:2 | B:2) at 0x7f2824944240>] """ model_copy = BayesianModel() model_copy.add_nodes_from(self.nodes()) model_copy.add_edges_from(self.edges()) if self.cpds: model_copy.add_cpds(*[cpd.copy() for cpd in self.cpds]) return model_copy
jhonatanoliveira/pgmpy
pgmpy/models/BayesianModel.py
Python
mit
36,892
[ "VisIt" ]
cf4d34ecfc3beb8ababe6c23234f0aad5944e78b1db401e9a584c21d85090137
# -*- coding: utf-8 -*- """ ===================================================== OT for image color adaptation with mapping estimation ===================================================== OT for domain adaptation with image color adaptation [6] with mapping estimation [8]. [6] Ferradans, S., Papadakis, N., Peyre, G., & Aujol, J. F. (2014). Regularized discrete optimal transport. SIAM Journal on Imaging Sciences, 7(3), 1853-1882. [8] M. Perrot, N. Courty, R. Flamary, A. Habrard, "Mapping estimation for discrete optimal transport", Neural Information Processing Systems (NIPS), 2016. """ # Authors: Remi Flamary <remi.flamary@unice.fr> # Stanislas Chambon <stan.chambon@gmail.com> # # License: MIT License import numpy as np from scipy import ndimage import matplotlib.pylab as pl import ot r = np.random.RandomState(42) def im2mat(I): """Converts and image to matrix (one pixel per line)""" return I.reshape((I.shape[0] * I.shape[1], I.shape[2])) def mat2im(X, shape): """Converts back a matrix to an image""" return X.reshape(shape) def minmax(I): return np.clip(I, 0, 1) ############################################################################## # Generate data # ------------- # Loading images I1 = ndimage.imread('../data/ocean_day.jpg').astype(np.float64) / 256 I2 = ndimage.imread('../data/ocean_sunset.jpg').astype(np.float64) / 256 X1 = im2mat(I1) X2 = im2mat(I2) # training samples nb = 1000 idx1 = r.randint(X1.shape[0], size=(nb,)) idx2 = r.randint(X2.shape[0], size=(nb,)) Xs = X1[idx1, :] Xt = X2[idx2, :] ############################################################################## # Domain adaptation for pixel distribution transfer # ------------------------------------------------- # EMDTransport ot_emd = ot.da.EMDTransport() ot_emd.fit(Xs=Xs, Xt=Xt) transp_Xs_emd = ot_emd.transform(Xs=X1) Image_emd = minmax(mat2im(transp_Xs_emd, I1.shape)) # SinkhornTransport ot_sinkhorn = ot.da.SinkhornTransport(reg_e=1e-1) ot_sinkhorn.fit(Xs=Xs, Xt=Xt) transp_Xs_sinkhorn = ot_sinkhorn.transform(Xs=X1) Image_sinkhorn = minmax(mat2im(transp_Xs_sinkhorn, I1.shape)) ot_mapping_linear = ot.da.MappingTransport( mu=1e0, eta=1e-8, bias=True, max_iter=20, verbose=True) ot_mapping_linear.fit(Xs=Xs, Xt=Xt) X1tl = ot_mapping_linear.transform(Xs=X1) Image_mapping_linear = minmax(mat2im(X1tl, I1.shape)) ot_mapping_gaussian = ot.da.MappingTransport( mu=1e0, eta=1e-2, sigma=1, bias=False, max_iter=10, verbose=True) ot_mapping_gaussian.fit(Xs=Xs, Xt=Xt) X1tn = ot_mapping_gaussian.transform(Xs=X1) # use the estimated mapping Image_mapping_gaussian = minmax(mat2im(X1tn, I1.shape)) ############################################################################## # Plot original images # -------------------- pl.figure(1, figsize=(6.4, 3)) pl.subplot(1, 2, 1) pl.imshow(I1) pl.axis('off') pl.title('Image 1') pl.subplot(1, 2, 2) pl.imshow(I2) pl.axis('off') pl.title('Image 2') pl.tight_layout() ############################################################################## # Plot pixel values distribution # ------------------------------ pl.figure(2, figsize=(6.4, 5)) pl.subplot(1, 2, 1) pl.scatter(Xs[:, 0], Xs[:, 2], c=Xs) pl.axis([0, 1, 0, 1]) pl.xlabel('Red') pl.ylabel('Blue') pl.title('Image 1') pl.subplot(1, 2, 2) pl.scatter(Xt[:, 0], Xt[:, 2], c=Xt) pl.axis([0, 1, 0, 1]) pl.xlabel('Red') pl.ylabel('Blue') pl.title('Image 2') pl.tight_layout() ############################################################################## # Plot transformed images # ----------------------- pl.figure(2, figsize=(10, 5)) pl.subplot(2, 3, 1) pl.imshow(I1) pl.axis('off') pl.title('Im. 1') pl.subplot(2, 3, 4) pl.imshow(I2) pl.axis('off') pl.title('Im. 2') pl.subplot(2, 3, 2) pl.imshow(Image_emd) pl.axis('off') pl.title('EmdTransport') pl.subplot(2, 3, 5) pl.imshow(Image_sinkhorn) pl.axis('off') pl.title('SinkhornTransport') pl.subplot(2, 3, 3) pl.imshow(Image_mapping_linear) pl.axis('off') pl.title('MappingTransport (linear)') pl.subplot(2, 3, 6) pl.imshow(Image_mapping_gaussian) pl.axis('off') pl.title('MappingTransport (gaussian)') pl.tight_layout() pl.show()
rflamary/POT
examples/plot_otda_mapping_colors_images.py
Python
mit
4,180
[ "Gaussian" ]
6f9670ed103574a4c67d7fff9214009d4fc1316be382cf0e2e3773fdca6ae677
# -*- coding: utf-8 -*- # # plot_weight_matrices.py # # This file is part of NEST. # # Copyright (C) 2004 The NEST Initiative # # NEST is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # NEST is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with NEST. If not, see <http://www.gnu.org/licenses/>. ''' Plot weight matrices example ---------------------------- This example demonstrates how to extract the connection strength for all the synapses among two populations of neurons and gather these values in weight matrices for further analysis and visualization. All connection types between these populations are considered, i.e., four weight matrices are created and plotted. ''' ''' First, we import all necessary modules to extract, handle and plot the connectivity matrices ''' import numpy as np import pylab import nest import matplotlib.gridspec as gridspec from mpl_toolkits.axes_grid1 import make_axes_locatable ''' We now specify a function which takes as arguments lists of neuron gids corresponding to each population ''' def plot_weight_matrices(E_neurons, I_neurons): ''' Function to extract and plot weight matrices for all connections among E_neurons and I_neurons ''' ''' First, we initialize all the matrices, whose dimensionality is determined by the number of elements in each population Since in this example, we have 2 populations (E/I), 2^2 possible synaptic connections exist (EE, EI, IE, II) ''' W_EE = np.zeros([len(E_neurons), len(E_neurons)]) W_EI = np.zeros([len(I_neurons), len(E_neurons)]) W_IE = np.zeros([len(E_neurons), len(I_neurons)]) W_II = np.zeros([len(I_neurons), len(I_neurons)]) ''' Using `GetConnections`, we extract the information about all the connections involving the populations of interest. `GetConnections` returns a list of arrays (connection objects), one per connection. Each array has the following elements: [source-gid target-gid target-thread synapse-model-id port] ''' a_EE = nest.GetConnections(E_neurons, E_neurons) ''' Using `GetStatus`, we can extract the value of the connection weight, for all the connections between these populations ''' c_EE = nest.GetStatus(a_EE, keys='weight') ''' Repeat the two previous steps for all other connection types ''' a_EI = nest.GetConnections(I_neurons, E_neurons) c_EI = nest.GetStatus(a_EI, keys='weight') a_IE = nest.GetConnections(E_neurons, I_neurons) c_IE = nest.GetStatus(a_IE, keys='weight') a_II = nest.GetConnections(I_neurons, I_neurons) c_II = nest.GetStatus(a_II, keys='weight') ''' We now iterate through the list of all connections of each type. To populate the corresponding weight matrix, we begin by identifying the source-gid (first element of each connection object, n[0]) and the target-gid (second element of each connection object, n[1]). For each gid, we subtract the minimum gid within the corresponding population, to assure the matrix indices range from 0 to the size of the population. After determining the matrix indices [i, j], for each connection object, the corresponding weight is added to the entry W[i,j]. The procedure is then repeated for all the different connection types ''' for idx,n in enumerate(a_EE): W_EE[n[0]-min(E_neurons), n[1]-min(E_neurons)] += c_EE[idx] for idx,n in enumerate(a_EI): W_EI[n[0]-min(I_neurons), n[1]-min(E_neurons)] += c_EI[idx] for idx,n in enumerate(a_IE): W_IE[n[0]-min(E_neurons), n[1]-min(I_neurons)] += c_IE[idx] for idx,n in enumerate(a_II): W_II[n[0]-min(I_neurons), n[1]-min(I_neurons)] += c_II[idx] ''' We can now specify the figure and axes properties. For this specifc example, we wish to display all the weight matrices in a single figure, which requires us to use ``GridSpec`` (for example) to specify the spatial arrangement of the axes. A subplot is subsequently created for each connection type. ''' fig = pylab.figure() fig.suptitle('Weight matrices', fontsize=14) gs = gridspec.GridSpec(4,4) ax1 = pylab.subplot(gs[:-1,:-1]) ax2 = pylab.subplot(gs[:-1,-1]) ax3 = pylab.subplot(gs[-1,:-1]) ax4 = pylab.subplot(gs[-1,-1]) ''' Using ``imshow``, we can visualize the weight matrix in the corresponding axis. We can also specify the colormap for this image. ''' plt1 = ax1.imshow(W_EE, cmap='jet') ''' Using the ``axis_divider`` module from ``mpl_toolkits``, we can allocate a small extra space on the right of the current axis, which we reserve for a colorbar ''' divider = make_axes_locatable(ax1) cax = divider.append_axes("right", "5%", pad="3%") pylab.colorbar(plt1, cax=cax) ''' We now set the title of each axis and adjust the axis subplot parameters ''' ax1.set_title('W_{EE}') pylab.tight_layout() ''' Finally, the last three steps are repeated for each synapse type ''' plt2 = ax2.imshow(W_IE) plt2.set_cmap('jet') divider = make_axes_locatable(ax2) cax = divider.append_axes("right", "5%", pad="3%") pylab.colorbar(plt2, cax=cax) ax2.set_title('W_{EI}') pylab.tight_layout() plt3 = ax3.imshow(W_EI) plt3.set_cmap('jet') divider = make_axes_locatable(ax3) cax = divider.append_axes("right", "5%", pad="3%") pylab.colorbar(plt3, cax=cax) ax3.set_title('W_{IE}') pylab.tight_layout() plt4 = ax4.imshow(W_II) plt4.set_cmap('jet') divider = make_axes_locatable(ax4) cax = divider.append_axes("right", "5%", pad="3%") pylab.colorbar(plt4, cax=cax) ax4.set_title('W_{II}') pylab.tight_layout()
zifeo/nest-simulator
pynest/examples/plot_weight_matrices.py
Python
gpl-2.0
6,305
[ "NEURON" ]
e213d598300b59c58a801f603f40cb756c35ccc78f7bf76552ea35e7d04f402f
from zope.interface import Interface from foolscap.api import StringConstraint, ListOf, TupleOf, SetOf, DictOf, \ ChoiceOf, IntegerConstraint, Any, RemoteInterface, Referenceable HASH_SIZE=32 Hash = StringConstraint(maxLength=HASH_SIZE, minLength=HASH_SIZE)# binary format 32-byte SHA256 hash Nodeid = StringConstraint(maxLength=20, minLength=20) # binary format 20-byte SHA1 hash FURL = StringConstraint(1000) StorageIndex = StringConstraint(16) URI = StringConstraint(300) # kind of arbitrary MAX_BUCKETS = 256 # per peer -- zfec offers at most 256 shares per file DEFAULT_MAX_SEGMENT_SIZE = 128*1024 ShareData = StringConstraint(None) URIExtensionData = StringConstraint(1000) Number = IntegerConstraint(8) # 2**(8*8) == 16EiB ~= 18e18 ~= 18 exabytes Offset = Number ReadSize = int # the 'int' constraint is 2**31 == 2Gib -- large files are processed in not-so-large increments WriteEnablerSecret = Hash # used to protect mutable bucket modifications LeaseRenewSecret = Hash # used to protect bucket lease renewal requests LeaseCancelSecret = Hash # used to protect bucket lease cancellation requests class RIStubClient(RemoteInterface): """Each client publishes a service announcement for a dummy object called the StubClient. This object doesn't actually offer any services, but the announcement helps the Introducer keep track of which clients are subscribed (so the grid admin can keep track of things like the size of the grid and the client versions in use. This is the (empty) RemoteInterface for the StubClient.""" class RIBucketWriter(RemoteInterface): """ Objects of this kind live on the server side. """ def write(offset=Offset, data=ShareData): return None def close(): """ If the data that has been written is incomplete or inconsistent then the server will throw the data away, else it will store it for future retrieval. """ return None def abort(): """Abandon all the data that has been written. """ return None class RIBucketReader(RemoteInterface): def read(offset=Offset, length=ReadSize): return ShareData def advise_corrupt_share(reason=str): """Clients who discover hash failures in shares that they have downloaded from me will use this method to inform me about the failures. I will record their concern so that my operator can manually inspect the shares in question. I return None. This is a wrapper around RIStorageServer.advise_corrupt_share(), which is tied to a specific share, and therefore does not need the extra share-identifying arguments. Please see that method for full documentation. """ TestVector = ListOf(TupleOf(Offset, ReadSize, str, str)) # elements are (offset, length, operator, specimen) # operator is one of "lt, le, eq, ne, ge, gt" # nop always passes and is used to fetch data while writing. # you should use length==len(specimen) for everything except nop DataVector = ListOf(TupleOf(Offset, ShareData)) # (offset, data). This limits us to 30 writes of 1MiB each per call TestAndWriteVectorsForShares = DictOf(int, TupleOf(TestVector, DataVector, ChoiceOf(None, Offset), # new_length )) ReadVector = ListOf(TupleOf(Offset, ReadSize)) ReadData = ListOf(ShareData) # returns data[offset:offset+length] for each element of TestVector class RIStorageServer(RemoteInterface): __remote_name__ = "RIStorageServer.tahoe.allmydata.com" def get_version(): """ Return a dictionary of version information. """ return DictOf(str, Any()) def allocate_buckets(storage_index=StorageIndex, renew_secret=LeaseRenewSecret, cancel_secret=LeaseCancelSecret, sharenums=SetOf(int, maxLength=MAX_BUCKETS), allocated_size=Offset, canary=Referenceable): """ @param storage_index: the index of the bucket to be created or increfed. @param sharenums: these are the share numbers (probably between 0 and 99) that the sender is proposing to store on this server. @param renew_secret: This is the secret used to protect bucket refresh This secret is generated by the client and stored for later comparison by the server. Each server is given a different secret. @param cancel_secret: Like renew_secret, but protects bucket decref. @param canary: If the canary is lost before close(), the bucket is deleted. @return: tuple of (alreadygot, allocated), where alreadygot is what we already have and allocated is what we hereby agree to accept. New leases are added for shares in both lists. """ return TupleOf(SetOf(int, maxLength=MAX_BUCKETS), DictOf(int, RIBucketWriter, maxKeys=MAX_BUCKETS)) def add_lease(storage_index=StorageIndex, renew_secret=LeaseRenewSecret, cancel_secret=LeaseCancelSecret): """ Add a new lease on the given bucket. If the renew_secret matches an existing lease, that lease will be renewed instead. If there is no bucket for the given storage_index, return silently. (note that in tahoe-1.3.0 and earlier, IndexError was raised if there was no bucket) """ return Any() # returns None now, but future versions might change def renew_lease(storage_index=StorageIndex, renew_secret=LeaseRenewSecret): """ Renew the lease on a given bucket, resetting the timer to 31 days. Some networks will use this, some will not. If there is no bucket for the given storage_index, IndexError will be raised. For mutable shares, if the given renew_secret does not match an existing lease, IndexError will be raised with a note listing the server-nodeids on the existing leases, so leases on migrated shares can be renewed or cancelled. For immutable shares, IndexError (without the note) will be raised. """ return Any() def cancel_lease(storage_index=StorageIndex, cancel_secret=LeaseCancelSecret): """ Cancel the lease on a given bucket. If this was the last lease on the bucket, the bucket will be deleted. If there is no bucket for the given storage_index, IndexError will be raised. For mutable shares, if the given cancel_secret does not match an existing lease, IndexError will be raised with a note listing the server-nodeids on the existing leases, so leases on migrated shares can be renewed or cancelled. For immutable shares, IndexError (without the note) will be raised. """ return Any() def get_buckets(storage_index=StorageIndex): return DictOf(int, RIBucketReader, maxKeys=MAX_BUCKETS) def slot_readv(storage_index=StorageIndex, shares=ListOf(int), readv=ReadVector): """Read a vector from the numbered shares associated with the given storage index. An empty shares list means to return data from all known shares. Returns a dictionary with one key per share.""" return DictOf(int, ReadData) # shnum -> results def slot_testv_and_readv_and_writev(storage_index=StorageIndex, secrets=TupleOf(WriteEnablerSecret, LeaseRenewSecret, LeaseCancelSecret), tw_vectors=TestAndWriteVectorsForShares, r_vector=ReadVector, ): """General-purpose test-and-set operation for mutable slots. Perform a bunch of comparisons against the existing shares. If they all pass, then apply a bunch of write vectors to those shares. Then use the read vectors to extract data from all the shares and return the data. This method is, um, large. The goal is to allow clients to update all the shares associated with a mutable file in a single round trip. @param storage_index: the index of the bucket to be created or increfed. @param write_enabler: a secret that is stored along with the slot. Writes are accepted from any caller who can present the matching secret. A different secret should be used for each slot*server pair. @param renew_secret: This is the secret used to protect bucket refresh This secret is generated by the client and stored for later comparison by the server. Each server is given a different secret. @param cancel_secret: Like renew_secret, but protects bucket decref. The 'secrets' argument is a tuple of (write_enabler, renew_secret, cancel_secret). The first is required to perform any write. The latter two are used when allocating new shares. To simply acquire a new lease on existing shares, use an empty testv and an empty writev. Each share can have a separate test vector (i.e. a list of comparisons to perform). If all vectors for all shares pass, then all writes for all shares are recorded. Each comparison is a 4-tuple of (offset, length, operator, specimen), which effectively does a bool( (read(offset, length)) OPERATOR specimen ) and only performs the write if all these evaluate to True. Basic test-and-set uses 'eq'. Write-if-newer uses a seqnum and (offset, length, 'lt', specimen). Write-if-same-or-newer uses 'le'. Reads from the end of the container are truncated, and missing shares behave like empty ones, so to assert that a share doesn't exist (for use when creating a new share), use (0, 1, 'eq', ''). The write vector will be applied to the given share, expanding it if necessary. A write vector applied to a share number that did not exist previously will cause that share to be created. Each write vector is accompanied by a 'new_length' argument. If new_length is not None, use it to set the size of the container. This can be used to pre-allocate space for a series of upcoming writes, or truncate existing data. If the container is growing, new_length will be applied before datav. If the container is shrinking, it will be applied afterwards. If new_length==0, the share will be deleted. The read vector is used to extract data from all known shares, *before* any writes have been applied. The same vector is used for all shares. This captures the state that was tested by the test vector. This method returns two values: a boolean and a dict. The boolean is True if the write vectors were applied, False if not. The dict is keyed by share number, and each value contains a list of strings, one for each element of the read vector. If the write_enabler is wrong, this will raise BadWriteEnablerError. To enable share migration (using update_write_enabler), the exception will have the nodeid used for the old write enabler embedded in it, in the following string:: The write enabler was recorded by nodeid '%s'. Note that the nodeid here is encoded using the same base32 encoding used by Foolscap and allmydata.util.idlib.nodeid_b2a(). """ return TupleOf(bool, DictOf(int, ReadData)) def advise_corrupt_share(share_type=str, storage_index=StorageIndex, shnum=int, reason=str): """Clients who discover hash failures in shares that they have downloaded from me will use this method to inform me about the failures. I will record their concern so that my operator can manually inspect the shares in question. I return None. 'share_type' is either 'mutable' or 'immutable'. 'storage_index' is a (binary) storage index string, and 'shnum' is the integer share number. 'reason' is a human-readable explanation of the problem, probably including some expected hash values and the computed ones which did not match. Corruption advisories for mutable shares should include a hash of the public key (the same value that appears in the mutable-file verify-cap), since the current share format does not store that on disk. """ class IStorageBucketWriter(Interface): """ Objects of this kind live on the client side. """ def put_block(segmentnum=int, data=ShareData): """@param data: For most segments, this data will be 'blocksize' bytes in length. The last segment might be shorter. @return: a Deferred that fires (with None) when the operation completes """ def put_plaintext_hashes(hashes=ListOf(Hash)): """ @return: a Deferred that fires (with None) when the operation completes """ def put_crypttext_hashes(hashes=ListOf(Hash)): """ @return: a Deferred that fires (with None) when the operation completes """ def put_block_hashes(blockhashes=ListOf(Hash)): """ @return: a Deferred that fires (with None) when the operation completes """ def put_share_hashes(sharehashes=ListOf(TupleOf(int, Hash))): """ @return: a Deferred that fires (with None) when the operation completes """ def put_uri_extension(data=URIExtensionData): """This block of data contains integrity-checking information (hashes of plaintext, crypttext, and shares), as well as encoding parameters that are necessary to recover the data. This is a serialized dict mapping strings to other strings. The hash of this data is kept in the URI and verified before any of the data is used. All buckets for a given file contain identical copies of this data. The serialization format is specified with the following pseudocode: for k in sorted(dict.keys()): assert re.match(r'^[a-zA-Z_\-]+$', k) write(k + ':' + netstring(dict[k])) @return: a Deferred that fires (with None) when the operation completes """ def close(): """Finish writing and close the bucket. The share is not finalized until this method is called: if the uploading client disconnects before calling close(), the partially-written share will be discarded. @return: a Deferred that fires (with None) when the operation completes """ class IStorageBucketReader(Interface): def get_block_data(blocknum=int, blocksize=int, size=int): """Most blocks will be the same size. The last block might be shorter than the others. @return: ShareData """ def get_crypttext_hashes(): """ @return: ListOf(Hash) """ def get_block_hashes(at_least_these=SetOf(int)): """ @return: ListOf(Hash) """ def get_share_hashes(at_least_these=SetOf(int)): """ @return: ListOf(TupleOf(int, Hash)) """ def get_uri_extension(): """ @return: URIExtensionData """ class IStorageBroker(Interface): def get_servers_for_psi(peer_selection_index): """ @return: list of IServer instances """ def get_connected_servers(): """ @return: frozenset of connected IServer instances """ def get_known_servers(): """ @return: frozenset of IServer instances """ def get_all_serverids(): """ @return: frozenset of serverid strings """ def get_nickname_for_serverid(serverid): """ @return: unicode nickname, or None """ # methods moved from IntroducerClient, need review def get_all_connections(): """Return a frozenset of (nodeid, service_name, rref) tuples, one for each active connection we've established to a remote service. This is mostly useful for unit tests that need to wait until a certain number of connections have been made.""" def get_all_connectors(): """Return a dict that maps from (nodeid, service_name) to a RemoteServiceConnector instance for all services that we are actively trying to connect to. Each RemoteServiceConnector has the following public attributes:: service_name: the type of service provided, like 'storage' announcement_time: when we first heard about this service last_connect_time: when we last established a connection last_loss_time: when we last lost a connection version: the peer's version, from the most recent connection oldest_supported: the peer's oldest supported version, same rref: the RemoteReference, if connected, otherwise None remote_host: the IAddress, if connected, otherwise None This method is intended for monitoring interfaces, such as a web page which describes connecting and connected peers. """ def get_all_peerids(): """Return a frozenset of all peerids to whom we have a connection (to one or more services) established. Mostly useful for unit tests.""" def get_all_connections_for(service_name): """Return a frozenset of (nodeid, service_name, rref) tuples, one for each active connection that provides the given SERVICE_NAME.""" def get_permuted_peers(service_name, key): """Returns an ordered list of (peerid, rref) tuples, selecting from the connections that provide SERVICE_NAME, using a hash-based permutation keyed by KEY. This randomizes the service list in a repeatable way, to distribute load over many peers. """ class IURI(Interface): def init_from_string(uri): """Accept a string (as created by my to_string() method) and populate this instance with its data. I am not normally called directly, please use the module-level uri.from_string() function to convert arbitrary URI strings into IURI-providing instances.""" def is_readonly(): """Return False if this URI be used to modify the data. Return True if this URI cannot be used to modify the data.""" def is_mutable(): """Return True if the data can be modified by *somebody* (perhaps someone who has a more powerful URI than this one).""" # TODO: rename to get_read_cap() def get_readonly(): """Return another IURI instance, which represents a read-only form of this one. If is_readonly() is True, this returns self.""" def get_verify_cap(): """Return an instance that provides IVerifierURI, which can be used to check on the availability of the file or directory, without providing enough capabilities to actually read or modify the contents. This may return None if the file does not need checking or verification (e.g. LIT URIs). """ def to_string(): """Return a string of printable ASCII characters, suitable for passing into init_from_string.""" class IVerifierURI(Interface, IURI): def init_from_string(uri): """Accept a string (as created by my to_string() method) and populate this instance with its data. I am not normally called directly, please use the module-level uri.from_string() function to convert arbitrary URI strings into IURI-providing instances.""" def to_string(): """Return a string of printable ASCII characters, suitable for passing into init_from_string.""" class IDirnodeURI(Interface): """I am a URI which represents a dirnode.""" class IFileURI(Interface): """I am a URI which represents a filenode.""" def get_size(): """Return the length (in bytes) of the file that I represent.""" class IImmutableFileURI(IFileURI): pass class IMutableFileURI(Interface): """I am a URI which represents a mutable filenode.""" class IDirectoryURI(Interface): pass class IReadonlyDirectoryURI(Interface): pass class CapConstraintError(Exception): """A constraint on a cap was violated.""" class MustBeDeepImmutableError(CapConstraintError): """Mutable children cannot be added to an immutable directory. Also, caps obtained from an immutable directory can trigger this error if they are later found to refer to a mutable object and then used.""" class MustBeReadonlyError(CapConstraintError): """Known write caps cannot be specified in a ro_uri field. Also, caps obtained from a ro_uri field can trigger this error if they are later found to be write caps and then used.""" class MustNotBeUnknownRWError(CapConstraintError): """Cannot add an unknown child cap specified in a rw_uri field.""" # The hierarchy looks like this: # IFilesystemNode # IFileNode # IMutableFileNode # IImmutableFileNode # IDirectoryNode class IFilesystemNode(Interface): def get_cap(): """Return the strongest 'cap instance' associated with this node. (writecap for writeable-mutable files/directories, readcap for immutable or readonly-mutable files/directories). To convert this into a string, call .to_string() on the result.""" def get_readcap(): """Return a readonly cap instance for this node. For immutable or readonly nodes, get_cap() and get_readcap() return the same thing.""" def get_repair_cap(): """Return an IURI instance that can be used to repair the file, or None if this node cannot be repaired (either because it is not distributed, like a LIT file, or because the node does not represent sufficient authority to create a repair-cap, like a read-only RSA mutable file node [which cannot create the correct write-enablers]). """ def get_verify_cap(): """Return an IVerifierURI instance that represents the 'verifiy/refresh capability' for this node. The holder of this capability will be able to renew the lease for this node, protecting it from garbage-collection. They will also be able to ask a server if it holds a share for the file or directory. """ def get_uri(): """Return the URI string corresponding to the strongest cap associated with this node. If this node is read-only, the URI will only offer read-only access. If this node is read-write, the URI will offer read-write access. If you have read-write access to a node and wish to share merely read-only access with others, use get_readonly_uri(). """ def get_write_uri(n): """Return the URI string that can be used by others to get write access to this node, if it is writeable. If this is a read-only node, return None.""" def get_readonly_uri(): """Return the URI string that can be used by others to get read-only access to this node. The result is a read-only URI, regardless of whether this node is read-only or read-write. If you have merely read-only access to this node, get_readonly_uri() will return the same thing as get_uri(). """ def get_storage_index(): """Return a string with the (binary) storage index in use on this download. This may be None if there is no storage index (i.e. LIT files and directories).""" def is_readonly(): """Return True if this reference provides mutable access to the given file or directory (i.e. if you can modify it), or False if not. Note that even if this reference is read-only, someone else may hold a read-write reference to it.""" def is_mutable(): """Return True if this file or directory is mutable (by *somebody*, not necessarily you), False if it is is immutable. Note that a file might be mutable overall, but your reference to it might be read-only. On the other hand, all references to an immutable file will be read-only; there are no read-write references to an immutable file. """ def is_unknown(): """Return True if this is an unknown node.""" def is_allowed_in_immutable_directory(): """Return True if this node is allowed as a child of a deep-immutable directory. This is true if either the node is of a known-immutable type, or it is unknown and read-only. """ def raise_error(): """Raise any error associated with this node.""" def get_size(): """Return the length (in bytes) of the data this node represents. For directory nodes, I return the size of the backing store. I return synchronously and do not consult the network, so for mutable objects, I will return the most recently observed size for the object, or None if I don't remember a size. Use get_current_size, which returns a Deferred, if you want more up-to-date information.""" def get_current_size(): """I return a Deferred that fires with the length (in bytes) of the data this node represents. """ class IFileNode(IFilesystemNode): """I am a node which represents a file: a sequence of bytes. I am not a container, like IDirectoryNode.""" class IImmutableFileNode(IFileNode): def read(consumer, offset=0, size=None): """Download a portion (possibly all) of the file's contents, making them available to the given IConsumer. Return a Deferred that fires (with the consumer) when the consumer is unregistered (either because the last byte has been given to it, or because the consumer threw an exception during write(), possibly because it no longer wants to receive data). The portion downloaded will start at 'offset' and contain 'size' bytes (or the remainder of the file if size==None). The consumer will be used in non-streaming mode: an IPullProducer will be attached to it. The consumer will not receive data right away: several network trips must occur first. The order of events will be:: consumer.registerProducer(p, streaming) (if streaming == False):: consumer does p.resumeProducing() consumer.write(data) consumer does p.resumeProducing() consumer.write(data).. (repeat until all data is written) consumer.unregisterProducer() deferred.callback(consumer) If a download error occurs, or an exception is raised by consumer.registerProducer() or consumer.write(), I will call consumer.unregisterProducer() and then deliver the exception via deferred.errback(). To cancel the download, the consumer should call p.stopProducing(), which will result in an exception being delivered via deferred.errback(). See src/allmydata/util/consumer.py for an example of a simple download-to-memory consumer. """ class IMutableFileNode(IFileNode): """I provide access to a 'mutable file', which retains its identity regardless of what contents are put in it. The consistency-vs-availability problem means that there might be multiple versions of a file present in the grid, some of which might be unrecoverable (i.e. have fewer than 'k' shares). These versions are loosely ordered: each has a sequence number and a hash, and any version with seqnum=N was uploaded by a node which has seen at least one version with seqnum=N-1. The 'servermap' (an instance of IMutableFileServerMap) is used to describe the versions that are known to be present in the grid, and which servers are hosting their shares. It is used to represent the 'state of the world', and is used for this purpose by my test-and-set operations. Downloading the contents of the mutable file will also return a servermap. Uploading a new version into the mutable file requires a servermap as input, and the semantics of the replace operation is 'replace the file with my new version if it looks like nobody else has changed the file since my previous download'. Because the file is distributed, this is not a perfect test-and-set operation, but it will do its best. If the replace process sees evidence of a simultaneous write, it will signal an UncoordinatedWriteError, so that the caller can take corrective action. Most readers will want to use the 'best' current version of the file, and should use my 'download_best_version()' method. To unconditionally replace the file, callers should use overwrite(). This is the mode that user-visible mutable files will probably use. To apply some delta to the file, call modify() with a callable modifier function that can apply the modification that you want to make. This is the mode that dirnodes will use, since most directory modification operations can be expressed in terms of deltas to the directory state. Three methods are available for users who need to perform more complex operations. The first is get_servermap(), which returns an up-to-date servermap using a specified mode. The second is download_version(), which downloads a specific version (not necessarily the 'best' one). The third is 'upload', which accepts new contents and a servermap (which must have been updated with MODE_WRITE). The upload method will attempt to apply the new contents as long as no other node has modified the file since the servermap was updated. This might be useful to a caller who wants to merge multiple versions into a single new one. Note that each time the servermap is updated, a specific 'mode' is used, which determines how many peers are queried. To use a servermap for my replace() method, that servermap must have been updated in MODE_WRITE. These modes are defined in allmydata.mutable.common, and consist of MODE_READ, MODE_WRITE, MODE_ANYTHING, and MODE_CHECK. Please look in allmydata/mutable/servermap.py for details about the differences. Mutable files are currently limited in size (about 3.5MB max) and can only be retrieved and updated all-at-once, as a single big string. Future versions of our mutable files will remove this restriction. """ def download_best_version(): """Download the 'best' available version of the file, meaning one of the recoverable versions with the highest sequence number. If no uncoordinated writes have occurred, and if enough shares are available, then this will be the most recent version that has been uploaded. I update an internal servermap with MODE_READ, determine which version of the file is indicated by servermap.best_recoverable_version(), and return a Deferred that fires with its contents. If no version is recoverable, the Deferred will errback with UnrecoverableFileError. """ def get_size_of_best_version(): """Find the size of the version that would be downloaded with download_best_version(), without actually downloading the whole file. I return a Deferred that fires with an integer. """ def overwrite(new_contents): """Unconditionally replace the contents of the mutable file with new ones. This simply chains get_servermap(MODE_WRITE) and upload(). This is only appropriate to use when the new contents of the file are completely unrelated to the old ones, and you do not care about other clients' changes. I return a Deferred that fires (with a PublishStatus object) when the update has completed. """ def modify(modifier_cb): """Modify the contents of the file, by downloading the current version, applying the modifier function (or bound method), then uploading the new version. I return a Deferred that fires (with a PublishStatus object) when the update is complete. The modifier callable will be given three arguments: a string (with the old contents), a 'first_time' boolean, and a servermap. As with download_best_version(), the old contents will be from the best recoverable version, but the modifier can use the servermap to make other decisions (such as refusing to apply the delta if there are multiple parallel versions, or if there is evidence of a newer unrecoverable version). 'first_time' will be True the first time the modifier is called, and False on any subsequent calls. The callable should return a string with the new contents. The callable must be prepared to be called multiple times, and must examine the input string to see if the change that it wants to make is already present in the old version. If it does not need to make any changes, it can either return None, or return its input string. If the modifier raises an exception, it will be returned in the errback. """ def get_servermap(mode): """Return a Deferred that fires with an IMutableFileServerMap instance, updated using the given mode. """ def download_version(servermap, version): """Download a specific version of the file, using the servermap as a guide to where the shares are located. I return a Deferred that fires with the requested contents, or errbacks with UnrecoverableFileError. Note that a servermap which was updated with MODE_ANYTHING or MODE_READ may not know about shares for all versions (those modes stop querying servers as soon as they can fulfil their goals), so you may want to use MODE_CHECK (which checks everything) to get increased visibility. """ def upload(new_contents, servermap): """Replace the contents of the file with new ones. This requires a servermap that was previously updated with MODE_WRITE. I attempt to provide test-and-set semantics, in that I will avoid modifying any share that is different than the version I saw in the servermap. However, if another node is writing to the file at the same time as me, I may manage to update some shares while they update others. If I see any evidence of this, I will signal UncoordinatedWriteError, and the file will be left in an inconsistent state (possibly the version you provided, possibly the old version, possibly somebody else's version, and possibly a mix of shares from all of these). The recommended response to UncoordinatedWriteError is to either return it to the caller (since they failed to coordinate their writes), or to attempt some sort of recovery. It may be sufficient to wait a random interval (with exponential backoff) and repeat your operation. If I do not signal UncoordinatedWriteError, then I was able to write the new version without incident. I return a Deferred that fires (with a PublishStatus object) when the publish has completed. I will update the servermap in-place with the location of all new shares. """ def get_writekey(): """Return this filenode's writekey, or None if the node does not have write-capability. This may be used to assist with data structures that need to make certain data available only to writers, such as the read-write child caps in dirnodes. The recommended process is to have reader-visible data be submitted to the filenode in the clear (where it will be encrypted by the filenode using the readkey), but encrypt writer-visible data using this writekey. """ class NotEnoughSharesError(Exception): """Download was unable to get enough shares""" class NoSharesError(Exception): """Download was unable to get any shares at all.""" class UploadUnhappinessError(Exception): """Upload was unable to satisfy 'servers_of_happiness'""" class UnableToFetchCriticalDownloadDataError(Exception): """I was unable to fetch some piece of critical data which is supposed to be identically present in all shares.""" class NoServersError(Exception): """Upload wasn't given any servers to work with, usually indicating a network or Introducer problem.""" class ExistingChildError(Exception): """A directory node was asked to add or replace a child that already exists, and overwrite= was set to False.""" class NoSuchChildError(Exception): """A directory node was asked to fetch a child which does not exist.""" class ChildOfWrongTypeError(Exception): """An operation was attempted on a child of the wrong type (file or directory).""" class IDirectoryNode(IFilesystemNode): """I represent a filesystem node that is a container, with a name-to-child mapping, holding the tahoe equivalent of a directory. All child names are unicode strings, and all children are some sort of IFilesystemNode (a file, subdirectory, or unknown node). """ def get_uri(): """ The dirnode ('1') URI returned by this method can be used in set_uri() on a different directory ('2') to 'mount' a reference to this directory ('1') under the other ('2'). This URI is just a string, so it can be passed around through email or other out-of-band protocol. """ def get_readonly_uri(): """ The dirnode ('1') URI returned by this method can be used in set_uri() on a different directory ('2') to 'mount' a reference to this directory ('1') under the other ('2'). This URI is just a string, so it can be passed around through email or other out-of-band protocol. """ def list(): """I return a Deferred that fires with a dictionary mapping child name (a unicode string) to (node, metadata_dict) tuples, in which 'node' is an IFilesystemNode and 'metadata_dict' is a dictionary of metadata.""" def has_child(name): """I return a Deferred that fires with a boolean, True if there exists a child of the given name, False if not. The child name must be a unicode string.""" def get(name): """I return a Deferred that fires with a specific named child node, which is an IFilesystemNode. The child name must be a unicode string. I raise NoSuchChildError if I do not have a child by that name.""" def get_metadata_for(name): """I return a Deferred that fires with the metadata dictionary for a specific named child node. The child name must be a unicode string. This metadata is stored in the *edge*, not in the child, so it is attached to the parent dirnode rather than the child node. I raise NoSuchChildError if I do not have a child by that name.""" def set_metadata_for(name, metadata): """I replace any existing metadata for the named child with the new metadata. The child name must be a unicode string. This metadata is stored in the *edge*, not in the child, so it is attached to the parent dirnode rather than the child node. I return a Deferred (that fires with this dirnode) when the operation is complete. I raise NoSuchChildError if I do not have a child by that name.""" def get_child_at_path(path): """Transform a child path into an IFilesystemNode. I perform a recursive series of 'get' operations to find the named descendant node. I return a Deferred that fires with the node, or errbacks with NoSuchChildError if the node could not be found. The path can be either a single string (slash-separated) or a list of path-name elements. All elements must be unicode strings. """ def get_child_and_metadata_at_path(path): """Transform a child path into an IFilesystemNode and metadata. I am like get_child_at_path(), but my Deferred fires with a tuple of (node, metadata). The metadata comes from the last edge. If the path is empty, the metadata will be an empty dictionary. """ def set_uri(name, writecap, readcap=None, metadata=None, overwrite=True): """I add a child (by writecap+readcap) at the specific name. I return a Deferred that fires when the operation finishes. If overwrite= is True, I will replace any existing child of the same name, otherwise an existing child will cause me to return ExistingChildError. The child name must be a unicode string. The child caps could be for a file, or for a directory. If you have both the writecap and readcap, you should provide both arguments. If you have only one cap and don't know whether it is read-only, provide it as the writecap argument and leave the readcap as None. If you have only one cap that is known to be read-only, provide it as the readcap argument and leave the writecap as None. The filecaps are typically obtained from an IFilesystemNode with get_uri() and get_readonly_uri(). If metadata= is provided, I will use it as the metadata for the named edge. This will replace any existing metadata. If metadata= is left as the default value of None, I will set ['mtime'] to the current time, and I will set ['ctime'] to the current time if there was not already a child by this name present. This roughly matches the ctime/mtime semantics of traditional filesystems. See the "About the metadata" section of webapi.txt for futher information. If this directory node is read-only, the Deferred will errback with a NotWriteableError.""" def set_children(entries, overwrite=True): """Add multiple children (by writecap+readcap) to a directory node. Takes a dictionary, with childname as keys and (writecap, readcap) tuples (or (writecap, readcap, metadata) triples) as values. Returns a Deferred that fires (with this dirnode) when the operation finishes. This is equivalent to calling set_uri() multiple times, but is much more efficient. All child names must be unicode strings. """ def set_node(name, child, metadata=None, overwrite=True): """I add a child at the specific name. I return a Deferred that fires when the operation finishes. This Deferred will fire with the child node that was just added. I will replace any existing child of the same name. The child name must be a unicode string. The 'child' instance must be an instance providing IFilesystemNode. If metadata= is provided, I will use it as the metadata for the named edge. This will replace any existing metadata. If metadata= is left as the default value of None, I will set ['mtime'] to the current time, and I will set ['ctime'] to the current time if there was not already a child by this name present. This roughly matches the ctime/mtime semantics of traditional filesystems. See the "About the metadata" section of webapi.txt for futher information. If this directory node is read-only, the Deferred will errback with a NotWriteableError.""" def set_nodes(entries, overwrite=True): """Add multiple children to a directory node. Takes a dict mapping unicode childname to (child_node, metdata) tuples. If metdata=None, the original metadata is left unmodified. Returns a Deferred that fires (with this dirnode) when the operation finishes. This is equivalent to calling set_node() multiple times, but is much more efficient.""" def add_file(name, uploadable, metadata=None, overwrite=True): """I upload a file (using the given IUploadable), then attach the resulting ImmutableFileNode to the directory at the given name. I set metadata the same way as set_uri and set_node. The child name must be a unicode string. I return a Deferred that fires (with the IFileNode of the uploaded file) when the operation completes.""" def delete(name, must_exist=True, must_be_directory=False, must_be_file=False): """I remove the child at the specific name. I return a Deferred that fires when the operation finishes. The child name must be a unicode string. If must_exist is True and I do not have a child by that name, I raise NoSuchChildError. If must_be_directory is True and the child is a file, or if must_be_file is True and the child is a directory, I raise ChildOfWrongTypeError.""" def create_subdirectory(name, initial_children={}, overwrite=True, metadata=None): """I create and attach a directory at the given name. The new directory can be empty, or it can be populated with children according to 'initial_children', which takes a dictionary in the same format as set_nodes (i.e. mapping unicode child name to (childnode, metadata) tuples). The child name must be a unicode string. I return a Deferred that fires (with the new directory node) when the operation finishes.""" def move_child_to(current_child_name, new_parent, new_child_name=None, overwrite=True): """I take one of my children and move them to a new parent. The child is referenced by name. On the new parent, the child will live under 'new_child_name', which defaults to 'current_child_name'. TODO: what should we do about metadata? I return a Deferred that fires when the operation finishes. The child name must be a unicode string. I raise NoSuchChildError if I do not have a child by that name.""" def build_manifest(): """I generate a table of everything reachable from this directory. I also compute deep-stats as described below. I return a Monitor. The Monitor's results will be a dictionary with four elements: res['manifest']: a list of (path, cap) tuples for all nodes (directories and files) reachable from this one. 'path' will be a tuple of unicode strings. The origin dirnode will be represented by an empty path tuple. res['verifycaps']: a list of (printable) verifycap strings, one for each reachable non-LIT node. This is a set: it will contain no duplicates. res['storage-index']: a list of (base32) storage index strings, one for each reachable non-LIT node. This is a set: it will contain no duplicates. res['stats']: a dictionary, the same that is generated by start_deep_stats() below. The Monitor will also have an .origin_si attribute with the (binary) storage index of the starting point. """ def start_deep_stats(): """Return a Monitor, examining all nodes (directories and files) reachable from this one. The Monitor's results will be a dictionary with the following keys:: count-immutable-files: count of how many CHK files are in the set count-mutable-files: same, for mutable files (does not include directories) count-literal-files: same, for LIT files count-files: sum of the above three count-directories: count of directories size-immutable-files: total bytes for all CHK files in the set size-mutable-files (TODO): same, for current version of all mutable files, does not include directories size-literal-files: same, for LIT files size-directories: size of mutable files used by directories largest-directory: number of bytes in the largest directory largest-directory-children: number of children in the largest directory largest-immutable-file: number of bytes in the largest CHK file size-mutable-files is not yet implemented, because it would involve even more queries than deep_stats does. The Monitor will also have an .origin_si attribute with the (binary) storage index of the starting point. This operation will visit every directory node underneath this one, and can take a long time to run. On a typical workstation with good bandwidth, this can examine roughly 15 directories per second (and takes several minutes of 100% CPU for ~1700 directories). """ class ICodecEncoder(Interface): def set_params(data_size, required_shares, max_shares): """Set up the parameters of this encoder. This prepares the encoder to perform an operation that converts a single block of data into a number of shares, such that a future ICodecDecoder can use a subset of these shares to recover the original data. This operation is invoked by calling encode(). Once the encoding parameters are set up, the encode operation can be invoked multiple times. set_params() prepares the encoder to accept blocks of input data that are exactly 'data_size' bytes in length. The encoder will be prepared to produce 'max_shares' shares for each encode() operation (although see the 'desired_share_ids' to use less CPU). The encoding math will be chosen such that the decoder can get by with as few as 'required_shares' of these shares and still reproduce the original data. For example, set_params(1000, 5, 5) offers no redundancy at all, whereas set_params(1000, 1, 10) provides 10x redundancy. Numerical Restrictions: 'data_size' is required to be an integral multiple of 'required_shares'. In general, the caller should choose required_shares and max_shares based upon their reliability requirements and the number of peers available (the total storage space used is roughly equal to max_shares*data_size/required_shares), then choose data_size to achieve the memory footprint desired (larger data_size means more efficient operation, smaller data_size means smaller memory footprint). In addition, 'max_shares' must be equal to or greater than 'required_shares'. Of course, setting them to be equal causes encode() to degenerate into a particularly slow form of the 'split' utility. See encode() for more details about how these parameters are used. set_params() must be called before any other ICodecEncoder methods may be invoked. """ def get_params(): """Return the 3-tuple of data_size, required_shares, max_shares""" def get_encoder_type(): """Return a short string that describes the type of this encoder. There is required to be a global table of encoder classes. This method returns an index into this table; the value at this index is an encoder class, and this encoder is an instance of that class. """ def get_block_size(): """Return the length of the shares that encode() will produce. """ def encode_proposal(data, desired_share_ids=None): """Encode some data. 'data' must be a string (or other buffer object), and len(data) must be equal to the 'data_size' value passed earlier to set_params(). This will return a Deferred that will fire with two lists. The first is a list of shares, each of which is a string (or other buffer object) such that len(share) is the same as what get_share_size() returned earlier. The second is a list of shareids, in which each is an integer. The lengths of the two lists will always be equal to each other. The user should take care to keep each share closely associated with its shareid, as one is useless without the other. The length of this output list will normally be the same as the value provided to the 'max_shares' parameter of set_params(). This may be different if 'desired_share_ids' is provided. 'desired_share_ids', if provided, is required to be a sequence of ints, each of which is required to be >= 0 and < max_shares. If not provided, encode() will produce 'max_shares' shares, as if 'desired_share_ids' were set to range(max_shares). You might use this if you initially thought you were going to use 10 peers, started encoding, and then two of the peers dropped out: you could use desired_share_ids= to skip the work (both memory and CPU) of producing shares for the peers which are no longer available. """ def encode(inshares, desired_share_ids=None): """Encode some data. This may be called multiple times. Each call is independent. inshares is a sequence of length required_shares, containing buffers (i.e. strings), where each buffer contains the next contiguous non-overlapping segment of the input data. Each buffer is required to be the same length, and the sum of the lengths of the buffers is required to be exactly the data_size promised by set_params(). (This implies that the data has to be padded before being passed to encode(), unless of course it already happens to be an even multiple of required_shares in length.) Note: the requirement to break up your data into 'required_shares' chunks of exactly the right length before calling encode() is surprising from point of view of a user who doesn't know how FEC works. It feels like an implementation detail that has leaked outside the abstraction barrier. Is there a use case in which the data to be encoded might already be available in pre-segmented chunks, such that it is faster or less work to make encode() take a list rather than splitting a single string? Yes, there is: suppose you are uploading a file with K=64, N=128, segsize=262,144. Then each in-share will be of size 4096. If you use this .encode() API then your code could first read each successive 4096-byte chunk from the file and store each one in a Python string and store each such Python string in a Python list. Then you could call .encode(), passing that list as "inshares". The encoder would generate the other 64 "secondary shares" and return to you a new list containing references to the same 64 Python strings that you passed in (as the primary shares) plus references to the new 64 Python strings. (You could even imagine that your code could use readv() so that the operating system can arrange to get all of those bytes copied from the file into the Python list of Python strings as efficiently as possible instead of having a loop written in C or in Python to copy the next part of the file into the next string.) On the other hand if you instead use the .encode_proposal() API (above), then your code can first read in all of the 262,144 bytes of the segment from the file into a Python string, then call .encode_proposal() passing the segment data as the "data" argument. The encoder would basically first split the "data" argument into a list of 64 in-shares of 4096 byte each, and then do the same thing that .encode() does. So this would result in a little bit more copying of data and a little bit higher of a "maximum memory usage" during the process, although it might or might not make a practical difference for our current use cases. Note that "inshares" is a strange name for the parameter if you think of the parameter as being just for feeding in data to the codec. It makes more sense if you think of the result of this encoding as being the set of shares from inshares plus an extra set of "secondary shares" (or "check shares"). It is a surprising name! If the API is going to be surprising then the name should be surprising. If we switch to encode_proposal() above then we should also switch to an unsurprising name. 'desired_share_ids', if provided, is required to be a sequence of ints, each of which is required to be >= 0 and < max_shares. If not provided, encode() will produce 'max_shares' shares, as if 'desired_share_ids' were set to range(max_shares). You might use this if you initially thought you were going to use 10 peers, started encoding, and then two of the peers dropped out: you could use desired_share_ids= to skip the work (both memory and CPU) of producing shares for the peers which are no longer available. For each call, encode() will return a Deferred that fires with two lists, one containing shares and the other containing the shareids. The get_share_size() method can be used to determine the length of the share strings returned by encode(). Each shareid is a small integer, exactly as passed into 'desired_share_ids' (or range(max_shares), if desired_share_ids was not provided). The shares and their corresponding shareids are required to be kept together during storage and retrieval. Specifically, the share data is useless by itself: the decoder needs to be told which share is which by providing it with both the shareid and the actual share data. This function will allocate an amount of memory roughly equal to:: (max_shares - required_shares) * get_share_size() When combined with the memory that the caller must allocate to provide the input data, this leads to a memory footprint roughly equal to the size of the resulting encoded shares (i.e. the expansion factor times the size of the input segment). """ # rejected ideas: # # returning a list of (shareidN,shareN) tuples instead of a pair of # lists (shareids..,shares..). Brian thought the tuples would # encourage users to keep the share and shareid together throughout # later processing, Zooko pointed out that the code to iterate # through two lists is not really more complicated than using a list # of tuples and there's also a performance improvement # # having 'data_size' not required to be an integral multiple of # 'required_shares'. Doing this would require encode() to perform # padding internally, and we'd prefer to have any padding be done # explicitly by the caller. Yes, it is an abstraction leak, but # hopefully not an onerous one. class ICodecDecoder(Interface): def set_params(data_size, required_shares, max_shares): """Set the params. They have to be exactly the same ones that were used for encoding.""" def get_needed_shares(): """Return the number of shares needed to reconstruct the data. set_params() is required to be called before this.""" def decode(some_shares, their_shareids): """Decode a partial list of shares into data. 'some_shares' is required to be a sequence of buffers of sharedata, a subset of the shares returned by ICodecEncode.encode(). Each share is required to be of the same length. The i'th element of their_shareids is required to be the shareid of the i'th buffer in some_shares. This returns a Deferred which fires with a sequence of buffers. This sequence will contain all of the segments of the original data, in order. The sum of the lengths of all of the buffers will be the 'data_size' value passed into the original ICodecEncode.set_params() call. To get back the single original input block of data, use ''.join(output_buffers), or you may wish to simply write them in order to an output file. Note that some of the elements in the result sequence may be references to the elements of the some_shares input sequence. In particular, this means that if those share objects are mutable (e.g. arrays) and if they are changed, then both the input (the 'some_shares' parameter) and the output (the value given when the deferred is triggered) will change. The length of 'some_shares' is required to be exactly the value of 'required_shares' passed into the original ICodecEncode.set_params() call. """ class IEncoder(Interface): """I take an object that provides IEncryptedUploadable, which provides encrypted data, and a list of shareholders. I then encode, hash, and deliver shares to those shareholders. I will compute all the necessary Merkle hash trees that are necessary to validate the crypttext that eventually comes back from the shareholders. I provide the URI Extension Block Hash, and the encoding parameters, both of which must be included in the URI. I do not choose shareholders, that is left to the IUploader. I must be given a dict of RemoteReferences to storage buckets that are ready and willing to receive data. """ def set_size(size): """Specify the number of bytes that will be encoded. This must be peformed before get_serialized_params() can be called. """ def set_params(params): """Override the default encoding parameters. 'params' is a tuple of (k,d,n), where 'k' is the number of required shares, 'd' is the servers_of_happiness, and 'n' is the total number of shares that will be created. Encoding parameters can be set in three ways. 1: The Encoder class provides defaults (3/7/10). 2: the Encoder can be constructed with an 'options' dictionary, in which the needed_and_happy_and_total_shares' key can be a (k,d,n) tuple. 3: set_params((k,d,n)) can be called. If you intend to use set_params(), you must call it before get_share_size or get_param are called. """ def set_encrypted_uploadable(u): """Provide a source of encrypted upload data. 'u' must implement IEncryptedUploadable. When this is called, the IEncryptedUploadable will be queried for its length and the storage_index that should be used. This returns a Deferred that fires with this Encoder instance. This must be performed before start() can be called. """ def get_param(name): """Return an encoding parameter, by name. 'storage_index': return a string with the (16-byte truncated SHA-256 hash) storage index to which these shares should be pushed. 'share_counts': return a tuple describing how many shares are used: (needed_shares, servers_of_happiness, total_shares) 'num_segments': return an int with the number of segments that will be encoded. 'segment_size': return an int with the size of each segment. 'block_size': return the size of the individual blocks that will be delivered to a shareholder's put_block() method. By knowing this, the shareholder will be able to keep all blocks in a single file and still provide random access when reading them. # TODO: can we avoid exposing this? 'share_size': an int with the size of the data that will be stored on each shareholder. This is aggregate amount of data that will be sent to the shareholder, summed over all the put_block() calls I will ever make. It is useful to determine this size before asking potential shareholders whether they will grant a lease or not, since their answers will depend upon how much space we need. TODO: this might also include some amount of overhead, like the size of all the hashes. We need to decide whether this is useful or not. 'serialized_params': a string with a concise description of the codec name and its parameters. This may be passed into the IUploadable to let it make sure that the same file encoded with different parameters will result in different storage indexes. Once this is called, set_size() and set_params() may not be called. """ def set_shareholders(shareholders, servermap): """Tell the encoder where to put the encoded shares. 'shareholders' must be a dictionary that maps share number (an integer ranging from 0 to n-1) to an instance that provides IStorageBucketWriter. 'servermap' is a dictionary that maps share number (as defined above) to a set of peerids. This must be performed before start() can be called.""" def start(): """Begin the encode/upload process. This involves reading encrypted data from the IEncryptedUploadable, encoding it, uploading the shares to the shareholders, then sending the hash trees. set_encrypted_uploadable() and set_shareholders() must be called before this can be invoked. This returns a Deferred that fires with a verify cap when the upload process is complete. The verifycap, plus the encryption key, is sufficient to construct the read cap. """ class IDecoder(Interface): """I take a list of shareholders and some setup information, then download, validate, decode, and decrypt data from them, writing the results to an output file. I do not locate the shareholders, that is left to the IDownloader. I must be given a dict of RemoteReferences to storage buckets that are ready to send data. """ def setup(outfile): """I take a file-like object (providing write and close) to which all the plaintext data will be written. TODO: producer/consumer . Maybe write() should return a Deferred that indicates when it will accept more data? But probably having the IDecoder be a producer is easier to glue to IConsumer pieces. """ def set_shareholders(shareholders): """I take a dictionary that maps share identifiers (small integers) to RemoteReferences that provide RIBucketReader. This must be called before start().""" def start(): """I start the download. This process involves retrieving data and hash chains from the shareholders, using the hashes to validate the data, decoding the shares into segments, decrypting the segments, then writing the resulting plaintext to the output file. I return a Deferred that will fire (with self) when the download is complete. """ class IDownloadTarget(Interface): # Note that if the IDownloadTarget is also an IConsumer, the downloader # will register itself as a producer. This allows the target to invoke # downloader.pauseProducing, resumeProducing, and stopProducing. def open(size): """Called before any calls to write() or close(). If an error occurs before any data is available, fail() may be called without a previous call to open(). 'size' is the length of the file being downloaded, in bytes.""" def write(data): """Output some data to the target.""" def close(): """Inform the target that there is no more data to be written.""" def fail(why): """fail() is called to indicate that the download has failed. 'why' is a Failure object indicating what went wrong. No further methods will be invoked on the IDownloadTarget after fail().""" def register_canceller(cb): """The CiphertextDownloader uses this to register a no-argument function that the target can call to cancel the download. Once this canceller is invoked, no further calls to write() or close() will be made.""" def finish(): """When the CiphertextDownloader is done, this finish() function will be called. Whatever it returns will be returned to the invoker of Downloader.download. """ class IDownloader(Interface): def download(uri, target): """Perform a CHK download, sending the data to the given target. 'target' must provide IDownloadTarget. Returns a Deferred that fires (with the results of target.finish) when the download is finished, or errbacks if something went wrong.""" class IEncryptedUploadable(Interface): def set_upload_status(upload_status): """Provide an IUploadStatus object that should be filled with status information. The IEncryptedUploadable is responsible for setting key-determination progress ('chk'), size, storage_index, and ciphertext-fetch progress. It may delegate some of this responsibility to others, in particular to the IUploadable.""" def get_size(): """This behaves just like IUploadable.get_size().""" def get_all_encoding_parameters(): """Return a Deferred that fires with a tuple of (k,happy,n,segment_size). The segment_size will be used as-is, and must match the following constraints: it must be a multiple of k, and it shouldn't be unreasonably larger than the file size (if segment_size is larger than filesize, the difference must be stored as padding). This usually passes through to the IUploadable method of the same name. The encoder strictly obeys the values returned by this method. To make an upload use non-default encoding parameters, you must arrange to control the values that this method returns. """ def get_storage_index(): """Return a Deferred that fires with a 16-byte storage index. """ def read_encrypted(length, hash_only): """This behaves just like IUploadable.read(), but returns crypttext instead of plaintext. If hash_only is True, then this discards the data (and returns an empty list); this improves efficiency when resuming an interrupted upload (where we need to compute the plaintext hashes, but don't need the redundant encrypted data).""" def get_plaintext_hashtree_leaves(first, last, num_segments): """OBSOLETE; Get the leaf nodes of a merkle hash tree over the plaintext segments, i.e. get the tagged hashes of the given segments. The segment size is expected to be generated by the IEncryptedUploadable before any plaintext is read or ciphertext produced, so that the segment hashes can be generated with only a single pass. This returns a Deferred which fires with a sequence of hashes, using: tuple(segment_hashes[first:last]) 'num_segments' is used to assert that the number of segments that the IEncryptedUploadable handled matches the number of segments that the encoder was expecting. This method must not be called until the final byte has been read from read_encrypted(). Once this method is called, read_encrypted() can never be called again. """ def get_plaintext_hash(): """OBSOLETE; Get the hash of the whole plaintext. This returns a Deferred which fires with a tagged SHA-256 hash of the whole plaintext, obtained from hashutil.plaintext_hash(data). """ def close(): """Just like IUploadable.close().""" class IUploadable(Interface): def set_upload_status(upload_status): """Provide an IUploadStatus object that should be filled with status information. The IUploadable is responsible for setting key-determination progress ('chk').""" def set_default_encoding_parameters(params): """Set the default encoding parameters, which must be a dict mapping strings to ints. The meaningful keys are 'k', 'happy', 'n', and 'max_segment_size'. These might have an influence on the final encoding parameters returned by get_all_encoding_parameters(), if the Uploadable doesn't have more specific preferences. This call is optional: if it is not used, the Uploadable will use some built-in defaults. If used, this method must be called before any other IUploadable methods to have any effect. """ def get_size(): """Return a Deferred that will fire with the length of the data to be uploaded, in bytes. This will be called before the data is actually used, to compute encoding parameters. """ def get_all_encoding_parameters(): """Return a Deferred that fires with a tuple of (k,happy,n,segment_size). The segment_size will be used as-is, and must match the following constraints: it must be a multiple of k, and it shouldn't be unreasonably larger than the file size (if segment_size is larger than filesize, the difference must be stored as padding). The relative values of k and n allow some IUploadables to request better redundancy than others (in exchange for consuming more space in the grid). Larger values of segment_size reduce hash overhead, while smaller values reduce memory footprint and cause data to be delivered in smaller pieces (which may provide a smoother and more predictable download experience). The encoder strictly obeys the values returned by this method. To make an upload use non-default encoding parameters, you must arrange to control the values that this method returns. One way to influence them may be to call set_encoding_parameters() before calling get_all_encoding_parameters(). """ def get_encryption_key(): """Return a Deferred that fires with a 16-byte AES key. This key will be used to encrypt the data. The key will also be hashed to derive the StorageIndex. Uploadables which want to achieve convergence should hash their file contents and the serialized_encoding_parameters to form the key (which of course requires a full pass over the data). Uploadables can use the upload.ConvergentUploadMixin class to achieve this automatically. Uploadables which do not care about convergence (or do not wish to make multiple passes over the data) can simply return a strongly-random 16 byte string. get_encryption_key() may be called multiple times: the IUploadable is required to return the same value each time. """ def read(length): """Return a Deferred that fires with a list of strings (perhaps with only a single element) which, when concatenated together, contain the next 'length' bytes of data. If EOF is near, this may provide fewer than 'length' bytes. The total number of bytes provided by read() before it signals EOF must equal the size provided by get_size(). If the data must be acquired through multiple internal read operations, returning a list instead of a single string may help to reduce string copies. However, the length of the concatenated strings must equal the amount of data requested, unless EOF is encountered. Long reads, or short reads without EOF, are not allowed. read() should return the same amount of data as a local disk file read, just in a different shape and asynchronously. 'length' will typically be equal to (min(get_size(),1MB)/req_shares), so a 10kB file means length=3kB, 100kB file means length=30kB, and >=1MB file means length=300kB. This method provides for a single full pass through the data. Later use cases may desire multiple passes or access to only parts of the data (such as a mutable file making small edits-in-place). This API will be expanded once those use cases are better understood. """ def close(): """The upload is finished, and whatever filehandle was in use may be closed.""" class IUploadResults(Interface): """I am returned by upload() methods. I contain a number of public attributes which can be read to determine the results of the upload. Some of these are functional, some are timing information. All of these may be None. .file_size : the size of the file, in bytes .uri : the CHK read-cap for the file .ciphertext_fetched : how many bytes were fetched by the helper .sharemap: dict mapping share identifier to set of serverids (binary strings). This indicates which servers were given which shares. For immutable files, the shareid is an integer (the share number, from 0 to N-1). For mutable files, it is a string of the form 'seq%d-%s-sh%d', containing the sequence number, the roothash, and the share number. .servermap : dict mapping server peerid to a set of share numbers .timings : dict of timing information, mapping name to seconds (float) total : total upload time, start to finish storage_index : time to compute the storage index peer_selection : time to decide which peers will be used contacting_helper : initial helper query to upload/no-upload decision existence_check : helper pre-upload existence check helper_total : initial helper query to helper finished pushing cumulative_fetch : helper waiting for ciphertext requests total_fetch : helper start to last ciphertext response cumulative_encoding : just time spent in zfec cumulative_sending : just time spent waiting for storage servers hashes_and_close : last segment push to shareholder close total_encode_and_push : first encode to shareholder close """ class IDownloadResults(Interface): """I am created internally by download() methods. I contain a number of public attributes which contain details about the download process.:: .file_size : the size of the file, in bytes .servers_used : set of server peerids that were used during download .server_problems : dict mapping server peerid to a problem string. Only servers that had problems (bad hashes, disconnects) are listed here. .servermap : dict mapping server peerid to a set of share numbers. Only servers that had any shares are listed here. .timings : dict of timing information, mapping name to seconds (float) peer_selection : time to ask servers about shares servers_peer_selection : dict of peerid to DYHB-query time uri_extension : time to fetch a copy of the URI extension block hashtrees : time to fetch the hash trees segments : time to fetch, decode, and deliver segments cumulative_fetch : time spent waiting for storage servers cumulative_decode : just time spent in zfec cumulative_decrypt : just time spent in decryption total : total download time, start to finish fetch_per_server : dict of peerid to list of per-segment fetch times """ class IUploader(Interface): def upload(uploadable): """Upload the file. 'uploadable' must impement IUploadable. This returns a Deferred which fires with an IUploadResults instance, from which the URI of the file can be obtained as results.uri .""" def upload_ssk(write_capability, new_version, uploadable): """TODO: how should this work?""" class ICheckable(Interface): def check(monitor, verify=False, add_lease=False): """Check up on my health, optionally repairing any problems. This returns a Deferred that fires with an instance that provides ICheckResults, or None if the object is non-distributed (i.e. LIT files). The monitor will be checked periodically to see if the operation has been cancelled. If so, no new queries will be sent, and the Deferred will fire (with a OperationCancelledError) immediately. Filenodes and dirnodes (which provide IFilesystemNode) are also checkable. Instances that represent verifier-caps will be checkable but not downloadable. Some objects (like LIT files) do not actually live in the grid, and their checkers return None (non-distributed files are always healthy). If verify=False, a relatively lightweight check will be performed: I will ask all servers if they have a share for me, and I will believe whatever they say. If there are at least N distinct shares on the grid, my results will indicate r.is_healthy()==True. This requires a roundtrip to each server, but does not transfer very much data, so the network bandwidth is fairly low. If verify=True, a more resource-intensive check will be performed: every share will be downloaded, and the hashes will be validated on every bit. I will ignore any shares that failed their hash checks. If there are at least N distinct valid shares on the grid, my results will indicate r.is_healthy()==True. This requires N/k times as much download bandwidth (and server disk IO) as a regular download. If a storage server is holding a corrupt share, or is experiencing memory failures during retrieval, or is malicious or buggy, then verification will detect the problem, but checking will not. If add_lease=True, I will ensure that an up-to-date lease is present on each share. The lease secrets will be derived from by node secret (in BASEDIR/private/secret), so either I will add a new lease to the share, or I will merely renew the lease that I already had. In a future version of the storage-server protocol (once Accounting has been implemented), there may be additional options here to define the kind of lease that is obtained (which account number to claim, etc). TODO: any problems seen during checking will be reported to the health-manager.furl, a centralized object which is responsible for figuring out why files are unhealthy so corrective action can be taken. """ def check_and_repair(monitor, verify=False, add_lease=False): """Like check(), but if the file/directory is not healthy, attempt to repair the damage. Any non-healthy result will cause an immediate repair operation, to generate and upload new shares. After repair, the file will be as healthy as we can make it. Details about what sort of repair is done will be put in the check-and-repair results. The Deferred will not fire until the repair is complete. This returns a Deferred which fires with an instance of ICheckAndRepairResults.""" class IDeepCheckable(Interface): def start_deep_check(verify=False, add_lease=False): """Check upon the health of me and everything I can reach. This is a recursive form of check(), useable only on dirnodes. I return a Monitor, with results that are an IDeepCheckResults object. TODO: If any of the directories I traverse are unrecoverable, the Monitor will report failure. If any of the files I check upon are unrecoverable, those problems will be reported in the IDeepCheckResults as usual, and the Monitor will not report a failure. """ def start_deep_check_and_repair(verify=False, add_lease=False): """Check upon the health of me and everything I can reach. Repair anything that isn't healthy. This is a recursive form of check_and_repair(), useable only on dirnodes. I return a Monitor, with results that are an IDeepCheckAndRepairResults object. TODO: If any of the directories I traverse are unrecoverable, the Monitor will report failure. If any of the files I check upon are unrecoverable, those problems will be reported in the IDeepCheckResults as usual, and the Monitor will not report a failure. """ class ICheckResults(Interface): """I contain the detailed results of a check/verify operation. """ def get_storage_index(): """Return a string with the (binary) storage index.""" def get_storage_index_string(): """Return a string with the (printable) abbreviated storage index.""" def get_uri(): """Return the (string) URI of the object that was checked.""" def is_healthy(): """Return a boolean, True if the file/dir is fully healthy, False if it is damaged in any way. Non-distributed LIT files always return True.""" def is_recoverable(): """Return a boolean, True if the file/dir can be recovered, False if not. Unrecoverable files are obviously unhealthy. Non-distributed LIT files always return True.""" def needs_rebalancing(): """Return a boolean, True if the file/dir's reliability could be improved by moving shares to new servers. Non-distributed LIT files always return False.""" def get_data(): """Return a dictionary that describes the state of the file/dir. LIT files always return an empty dictionary. Normal files and directories return a dictionary with the following keys (note that these use binary strings rather than base32-encoded ones) (also note that for mutable files, these counts are for the 'best' version): count-shares-good: the number of distinct good shares that were found count-shares-needed: 'k', the number of shares required for recovery count-shares-expected: 'N', the number of total shares generated count-good-share-hosts: the number of distinct storage servers with good shares. If this number is less than count-shares-good, then some shares are doubled up, increasing the correlation of failures. This indicates that one or more shares should be moved to an otherwise unused server, if one is available. count-corrupt-shares: the number of shares with integrity failures list-corrupt-shares: a list of 'share locators', one for each share that was found to be corrupt. Each share locator is a list of (serverid, storage_index, sharenum). count-incompatible-shares: the number of shares which are of a share format unknown to this checker list-incompatible-shares: a list of 'share locators', one for each share that was found to be of an unknown format. Each share locator is a list of (serverid, storage_index, sharenum). servers-responding: list of (binary) storage server identifiers, one for each server which responded to the share query (even if they said they didn't have shares, and even if they said they did have shares but then didn't send them when asked, or dropped the connection, or returned a Failure, and even if they said they did have shares and sent incorrect ones when asked) sharemap: dict mapping share identifier to list of serverids (binary strings). This indicates which servers are holding which shares. For immutable files, the shareid is an integer (the share number, from 0 to N-1). For mutable files, it is a string of the form 'seq%d-%s-sh%d', containing the sequence number, the roothash, and the share number. The following keys are most relevant for mutable files, but immutable files will provide sensible values too:: count-wrong-shares: the number of shares for versions other than the 'best' one (which is defined as being the recoverable version with the highest sequence number, then the highest roothash). These are either leftover shares from an older version (perhaps on a server that was offline when an update occurred), shares from an unrecoverable newer version, or shares from an alternate current version that results from an uncoordinated write collision. For a healthy file, this will equal 0. count-recoverable-versions: the number of recoverable versions of the file. For a healthy file, this will equal 1. count-unrecoverable-versions: the number of unrecoverable versions of the file. For a healthy file, this will be 0. """ def get_summary(): """Return a string with a brief (one-line) summary of the results.""" def get_report(): """Return a list of strings with more detailed results.""" class ICheckAndRepairResults(Interface): """I contain the detailed results of a check/verify/repair operation. The IFilesystemNode.check()/verify()/repair() methods all return instances that provide ICheckAndRepairResults. """ def get_storage_index(): """Return a string with the (binary) storage index.""" def get_storage_index_string(): """Return a string with the (printable) abbreviated storage index.""" def get_repair_attempted(): """Return a boolean, True if a repair was attempted. We might not attempt to repair the file because it was healthy, or healthy enough (i.e. some shares were missing but not enough to exceed some threshold), or because we don't know how to repair this object.""" def get_repair_successful(): """Return a boolean, True if repair was attempted and the file/dir was fully healthy afterwards. False if no repair was attempted or if a repair attempt failed.""" def get_pre_repair_results(): """Return an ICheckResults instance that describes the state of the file/dir before any repair was attempted.""" def get_post_repair_results(): """Return an ICheckResults instance that describes the state of the file/dir after any repair was attempted. If no repair was attempted, the pre-repair and post-repair results will be identical.""" class IDeepCheckResults(Interface): """I contain the results of a deep-check operation. This is returned by a call to ICheckable.deep_check(). """ def get_root_storage_index_string(): """Return the storage index (abbreviated human-readable string) of the first object checked.""" def get_counters(): """Return a dictionary with the following keys:: count-objects-checked: count of how many objects were checked count-objects-healthy: how many of those objects were completely healthy count-objects-unhealthy: how many were damaged in some way count-objects-unrecoverable: how many were unrecoverable count-corrupt-shares: how many shares were found to have corruption, summed over all objects examined """ def get_corrupt_shares(): """Return a set of (serverid, storage_index, sharenum) for all shares that were found to be corrupt. Both serverid and storage_index are binary. """ def get_all_results(): """Return a dictionary mapping pathname (a tuple of strings, ready to be slash-joined) to an ICheckResults instance, one for each object that was checked.""" def get_results_for_storage_index(storage_index): """Retrive the ICheckResults instance for the given (binary) storage index. Raises KeyError if there are no results for that storage index.""" def get_stats(): """Return a dictionary with the same keys as IDirectoryNode.deep_stats().""" class IDeepCheckAndRepairResults(Interface): """I contain the results of a deep-check-and-repair operation. This is returned by a call to ICheckable.deep_check_and_repair(). """ def get_root_storage_index_string(): """Return the storage index (abbreviated human-readable string) of the first object checked.""" def get_counters(): """Return a dictionary with the following keys:: count-objects-checked: count of how many objects were checked count-objects-healthy-pre-repair: how many of those objects were completely healthy (before any repair) count-objects-unhealthy-pre-repair: how many were damaged in some way count-objects-unrecoverable-pre-repair: how many were unrecoverable count-objects-healthy-post-repair: how many of those objects were completely healthy (after any repair) count-objects-unhealthy-post-repair: how many were damaged in some way count-objects-unrecoverable-post-repair: how many were unrecoverable count-repairs-attempted: repairs were attempted on this many objects. The count-repairs- keys will always be provided, however unless repair=true is present, they will all be zero. count-repairs-successful: how many repairs resulted in healthy objects count-repairs-unsuccessful: how many repairs resulted did not results in completely healthy objects count-corrupt-shares-pre-repair: how many shares were found to have corruption, summed over all objects examined (before any repair) count-corrupt-shares-post-repair: how many shares were found to have corruption, summed over all objects examined (after any repair) """ def get_stats(): """Return a dictionary with the same keys as IDirectoryNode.deep_stats().""" def get_corrupt_shares(): """Return a set of (serverid, storage_index, sharenum) for all shares that were found to be corrupt before any repair was attempted. Both serverid and storage_index are binary. """ def get_remaining_corrupt_shares(): """Return a set of (serverid, storage_index, sharenum) for all shares that were found to be corrupt after any repair was completed. Both serverid and storage_index are binary. These are shares that need manual inspection and probably deletion. """ def get_all_results(): """Return a dictionary mapping pathname (a tuple of strings, ready to be slash-joined) to an ICheckAndRepairResults instance, one for each object that was checked.""" def get_results_for_storage_index(storage_index): """Retrive the ICheckAndRepairResults instance for the given (binary) storage index. Raises KeyError if there are no results for that storage index.""" class IRepairable(Interface): def repair(check_results): """Attempt to repair the given object. Returns a Deferred that fires with a IRepairResults object. I must be called with an object that implements ICheckResults, as proof that you have actually discovered a problem with this file. I will use the data in the checker results to guide the repair process, such as which servers provided bad data and should therefore be avoided. The ICheckResults object is inside the ICheckAndRepairResults object, which is returned by the ICheckable.check() method:: d = filenode.check(repair=False) def _got_results(check_and_repair_results): check_results = check_and_repair_results.get_pre_repair_results() return filenode.repair(check_results) d.addCallback(_got_results) return d """ class IRepairResults(Interface): """I contain the results of a repair operation.""" def get_successful(self): """Returns a boolean: True if the repair made the file healthy, False if not. Repair failure generally indicates a file that has been damaged beyond repair.""" class IClient(Interface): def upload(uploadable): """Upload some data into a CHK, get back the UploadResults for it. @param uploadable: something that implements IUploadable @return: a Deferred that fires with the UploadResults instance. To get the URI for this file, use results.uri . """ def create_mutable_file(contents=""): """Create a new mutable file (with initial) contents, get back the new node instance. @param contents: (bytestring, callable, or None): this provides the initial contents of the mutable file. If 'contents' is a bytestring, it will be used as-is. If 'contents' is a callable, it will be invoked with the new MutableFileNode instance and is expected to return a bytestring with the initial contents of the file (the callable can use node.get_writekey() to decide how to encrypt the initial contents, e.g. for a brand new dirnode with initial children). contents=None is equivalent to an empty string. Using content_maker= is more efficient than creating a mutable file and setting its contents in two separate operations. @return: a Deferred that fires with an IMutableFileNode instance. """ def create_dirnode(initial_children={}): """Create a new unattached dirnode, possibly with initial children. @param initial_children: dict with keys that are unicode child names, and values that are (childnode, metadata) tuples. @return: a Deferred that fires with the new IDirectoryNode instance. """ def create_node_from_uri(uri, rouri): """Create a new IFilesystemNode instance from the uri, synchronously. @param uri: a string or IURI-providing instance, or None. This could be for a LiteralFileNode, a CHK file node, a mutable file node, or a directory node @param rouri: a string or IURI-providing instance, or None. If the main uri is None, I will use the rouri instead. If I recognize the format of the main uri, I will ignore the rouri (because it can be derived from the writecap). @return: an instance that provides IFilesystemNode (or more usefully one of its subclasses). File-specifying URIs will result in IFileNode-providing instances, like ImmutableFileNode, LiteralFileNode, or MutableFileNode. Directory-specifying URIs will result in IDirectoryNode-providing instances, like DirectoryNode. """ class INodeMaker(Interface): """The NodeMaker is used to create IFilesystemNode instances. It can accept a filecap/dircap string and return the node right away. It can also create new nodes (i.e. upload a file, or create a mutable file) asynchronously. Once you have one of these nodes, you can use other methods to determine whether it is a file or directory, and to download or modify its contents. The NodeMaker encapsulates all the authorities that these IFilesystemNodes require (like references to the StorageFarmBroker). Each Tahoe process will typically have a single NodeMaker, but unit tests may create simplified/mocked forms for testing purposes. """ def create_from_cap(writecap, readcap=None, **kwargs): """I create an IFilesystemNode from the given writecap/readcap. I can only provide nodes for existing file/directory objects: use my other methods to create new objects. I return synchronously.""" def create_mutable_file(contents=None, keysize=None): """I create a new mutable file, and return a Deferred which will fire with the IMutableFileNode instance when it is ready. If contents= is provided (a bytestring), it will be used as the initial contents of the new file, otherwise the file will contain zero bytes. keysize= is for use by unit tests, to create mutable files that are smaller than usual.""" def create_new_mutable_directory(initial_children={}): """I create a new mutable directory, and return a Deferred which will fire with the IDirectoryNode instance when it is ready. If initial_children= is provided (a dict mapping unicode child name to (childnode, metadata_dict) tuples), the directory will be populated with those children, otherwise it will be empty.""" class IClientStatus(Interface): def list_all_uploads(): """Return a list of uploader objects, one for each upload which currently has an object available (tracked with weakrefs). This is intended for debugging purposes.""" def list_active_uploads(): """Return a list of active IUploadStatus objects.""" def list_recent_uploads(): """Return a list of IUploadStatus objects for the most recently started uploads.""" def list_all_downloads(): """Return a list of downloader objects, one for each download which currently has an object available (tracked with weakrefs). This is intended for debugging purposes.""" def list_active_downloads(): """Return a list of active IDownloadStatus objects.""" def list_recent_downloads(): """Return a list of IDownloadStatus objects for the most recently started downloads.""" class IUploadStatus(Interface): def get_started(): """Return a timestamp (float with seconds since epoch) indicating when the operation was started.""" def get_storage_index(): """Return a string with the (binary) storage index in use on this upload. Returns None if the storage index has not yet been calculated.""" def get_size(): """Return an integer with the number of bytes that will eventually be uploaded for this file. Returns None if the size is not yet known. """ def using_helper(): """Return True if this upload is using a Helper, False if not.""" def get_status(): """Return a string describing the current state of the upload process.""" def get_progress(): """Returns a tuple of floats, (chk, ciphertext, encode_and_push), each from 0.0 to 1.0 . 'chk' describes how much progress has been made towards hashing the file to determine a CHK encryption key: if non-convergent encryption is in use, this will be trivial, otherwise the whole file must be hashed. 'ciphertext' describes how much of the ciphertext has been pushed to the helper, and is '1.0' for non-helper uploads. 'encode_and_push' describes how much of the encode-and-push process has finished: for helper uploads this is dependent upon the helper providing progress reports. It might be reasonable to add all three numbers and report the sum to the user.""" def get_active(): """Return True if the upload is currently active, False if not.""" def get_results(): """Return an instance of UploadResults (which contains timing and sharemap information). Might return None if the upload is not yet finished.""" def get_counter(): """Each upload status gets a unique number: this method returns that number. This provides a handle to this particular upload, so a web page can generate a suitable hyperlink.""" class IDownloadStatus(Interface): def get_started(): """Return a timestamp (float with seconds since epoch) indicating when the operation was started.""" def get_storage_index(): """Return a string with the (binary) storage index in use on this download. This may be None if there is no storage index (i.e. LIT files).""" def get_size(): """Return an integer with the number of bytes that will eventually be retrieved for this file. Returns None if the size is not yet known. """ def using_helper(): """Return True if this download is using a Helper, False if not.""" def get_status(): """Return a string describing the current state of the download process.""" def get_progress(): """Returns a float (from 0.0 to 1.0) describing the amount of the download that has completed. This value will remain at 0.0 until the first byte of plaintext is pushed to the download target.""" def get_active(): """Return True if the download is currently active, False if not.""" def get_counter(): """Each download status gets a unique number: this method returns that number. This provides a handle to this particular download, so a web page can generate a suitable hyperlink.""" class IServermapUpdaterStatus(Interface): pass class IPublishStatus(Interface): pass class IRetrieveStatus(Interface): pass class NotCapableError(Exception): """You have tried to write to a read-only node.""" class BadWriteEnablerError(Exception): pass class RIControlClient(RemoteInterface): def wait_for_client_connections(num_clients=int): """Do not return until we have connections to at least NUM_CLIENTS storage servers. """ def upload_from_file_to_uri(filename=str, convergence=ChoiceOf(None, StringConstraint(2**20))): """Upload a file to the grid. This accepts a filename (which must be absolute) that points to a file on the node's local disk. The node will read the contents of this file, upload it to the grid, then return the URI at which it was uploaded. If convergence is None then a random encryption key will be used, else the plaintext will be hashed, then that hash will be mixed together with the "convergence" string to form the encryption key. """ return URI def download_from_uri_to_file(uri=URI, filename=str): """Download a file from the grid, placing it on the node's local disk at the given filename (which must be absolute[?]). Returns the absolute filename where the file was written.""" return str # debug stuff def get_memory_usage(): """Return a dict describes the amount of memory currently in use. The keys are 'VmPeak', 'VmSize', and 'VmData'. The values are integers, measuring memory consupmtion in bytes.""" return DictOf(str, int) def speed_test(count=int, size=int, mutable=Any()): """Write 'count' tempfiles to disk, all of the given size. Measure how long (in seconds) it takes to upload them all to the servers. Then measure how long it takes to download all of them. If 'mutable' is 'create', time creation of mutable files. If 'mutable' is 'upload', then time access to the same mutable file instead of creating one. Returns a tuple of (upload_time, download_time). """ return (float, float) def measure_peer_response_time(): """Send a short message to each connected peer, and measure the time it takes for them to respond to it. This is a rough measure of the application-level round trip time. @return: a dictionary mapping peerid to a float (RTT time in seconds) """ return DictOf(str, float) UploadResults = Any() #DictOf(str, str) class RIEncryptedUploadable(RemoteInterface): __remote_name__ = "RIEncryptedUploadable.tahoe.allmydata.com" def get_size(): return Offset def get_all_encoding_parameters(): return (int, int, int, long) def read_encrypted(offset=Offset, length=ReadSize): return ListOf(str) def close(): return None class RICHKUploadHelper(RemoteInterface): __remote_name__ = "RIUploadHelper.tahoe.allmydata.com" def get_version(): """ Return a dictionary of version information. """ return DictOf(str, Any()) def upload(reader=RIEncryptedUploadable): return UploadResults class RIHelper(RemoteInterface): __remote_name__ = "RIHelper.tahoe.allmydata.com" def get_version(): """ Return a dictionary of version information. """ return DictOf(str, Any()) def upload_chk(si=StorageIndex): """See if a file with a given storage index needs uploading. The helper will ask the appropriate storage servers to see if the file has already been uploaded. If so, the helper will return a set of 'upload results' that includes whatever hashes are needed to build the read-cap, and perhaps a truncated sharemap. If the file has not yet been uploaded (or if it was only partially uploaded), the helper will return an empty upload-results dictionary and also an RICHKUploadHelper object that will take care of the upload process. The client should call upload() on this object and pass it a reference to an RIEncryptedUploadable object that will provide ciphertext. When the upload is finished, the upload() method will finish and return the upload results. """ return (UploadResults, ChoiceOf(RICHKUploadHelper, None)) class RIStatsProvider(RemoteInterface): __remote_name__ = "RIStatsProvider.tahoe.allmydata.com" """ Provides access to statistics and monitoring information. """ def get_stats(): """ returns a dictionary containing 'counters' and 'stats', each a dictionary with string counter/stat name keys, and numeric or None values. counters are monotonically increasing measures of work done, and stats are instantaneous measures (potentially time averaged internally) """ return DictOf(str, DictOf(str, ChoiceOf(float, int, long, None))) class RIStatsGatherer(RemoteInterface): __remote_name__ = "RIStatsGatherer.tahoe.allmydata.com" """ Provides a monitoring service for centralised collection of stats """ def provide(provider=RIStatsProvider, nickname=str): """ @param provider: a stats collector instance which should be polled periodically by the gatherer to collect stats. @param nickname: a name useful to identify the provided client """ return None class IStatsProducer(Interface): def get_stats(): """ returns a dictionary, with str keys representing the names of stats to be monitored, and numeric values. """ class RIKeyGenerator(RemoteInterface): __remote_name__ = "RIKeyGenerator.tahoe.allmydata.com" """ Provides a service offering to make RSA key pairs. """ def get_rsa_key_pair(key_size=int): """ @param key_size: the size of the signature key. @return: tuple(verifying_key, signing_key) """ return TupleOf(str, str) class FileTooLargeError(Exception): pass class IValidatedThingProxy(Interface): def start(): """ Acquire a thing and validate it. Return a deferred which is eventually fired with self if the thing is valid or errbacked if it can't be acquired or validated.""" class InsufficientVersionError(Exception): def __init__(self, needed, got): self.needed = needed self.got = got def __repr__(self): return "InsufficientVersionError(need '%s', got %s)" % (self.needed, self.got) class EmptyPathnameComponentError(Exception): """The webapi disallows empty pathname components."""
drewp/tahoe-lafs
src/allmydata/interfaces.py
Python
gpl-2.0
115,999
[ "Brian", "VisIt" ]
d28b908f7af5136c118e462d8777e3b719bd35237d481a7a40ab970f4183db03
# Copyright (C) Schrodinger, LLC. # All Rights Reserved # # For more information, see LICENSE in PyMOL's home directory. # # justhttpd.py # # vanilla web server designed for testing multi-origin applications # by serving up content on 127.0.0.1:xxxx instead of localhost:yyyy import BaseHTTPServer, cgi, urlparse, socket import types, os, sys, traceback, threading class _HTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler): def do_GET(self): self.process_request() def do_POST(self): self.process_request() def process_request(self): """ parse any URL or FORM arguments and process the request """ # verify that the request is coming from localhost try: host, port = self.client_address if host != '127.0.0.1': self.send_error(403, "Only localhost requests are allowed (not: %s)" % host) else: self.callback = None self.parse_args() self.send_doc() except socket.error: pass def parse_args(self): if (self.command == "POST"): self.fs = cgi.FieldStorage(fp=self.rfile, headers=self.headers, environ = {'REQUEST_METHOD':'POST'}, keep_blank_values = 1) self.urlpath = self.path elif (self.command == "GET"): scheme,netloc,path,params,qs,fragment = urlparse.urlparse(self.path) self.fs = cgi.FieldStorage(environ = {'REQUEST_METHOD':'GET', 'QUERY_STRING':qs}, keep_blank_values = 1) self.urlpath = path else: self.fs = None def send_doc(self): """ send a document (file) in the current directory or any sub-directory """ path_list = self.path.split('/')[1:] if '..' in path_list: # prevent access to parent directories self.send_error(404,"Illegal path.") self.wfile.write(": %s" % self.path) elif self.server.root == None: self.send_error(404,"No content root specified.") else: try: full_path = os.path.join(*[self.server.root] + list(path_list)) print full_path if os.path.isdir(full_path): full_path = full_path + "/index.html" fp = open(full_path,"rb") self.send_ok(self.guess_mime(full_path)) self.wfile.write(fp.read()) fp.close() except: self.send_error(404,"Unable to locate document.") self.wfile.write(": %s" % self.path) self.wfile.write(str(sys.exc_info())) # exc_info() is thread safe # self.wfile.write(sys.exc_value) # exc_value is not thread safe def guess_mime(self,path): """ guess the mime type based on the file extension """ if path.endswith('.html'): return 'text/html' elif path.endswith('.js'): return 'application/x-javascript' elif path.endswith('.jpg'): return 'image/jpeg' elif path.endswith('.png'): return 'image/png' elif path.endswith('.gif'): return 'image/gif' elif path.endswith('.sdf'): return 'chemical/x-mdl-sdfile' elif path.endswith('.mol'): return 'chemical/x-mdl-molfile' elif path.endswith('.pwg'): return 'application/x-pymol' else: return 'text/plain' def send_error(self,errcode,errmsg): try: self.send_response(errcode) self.send_header('Content-type', 'text/plain') self.end_headers() self.wfile.write("HTTPd-Error: "+errmsg+"\n") except: # right now we're swallowing any/all exceptions # (e.g. Broken Pipe) pass def send_ok(self, mime='text/html'): self.send_response(200) self.send_header('Content-type', mime) self.send_header('Pragma','no-cache') self.send_header('Cache-Control','no-cache, must-revalidate') self.send_header('Expires','Sat, 10 Jan 2008 01:00:00 GMT') self.end_headers() def echo_args(self): """ for debugging requests """ self.wfile.write("%s\n" % self.command) if (self.fs): for k in self.fs.keys(): self.wfile.write("%s = " % k) # key can have multiple values, as with checkboxes, # but also arbitrarily if (isinstance(self.fs[k], types.ListType)): self.wfile.write("%s\n" % self.fs.getlist(k)) else: # key can be uploaded file if (self.fs[k].filename): self.wfile.write("%s\n" % self.fs[k].filename) fp = self.fs[k].file #self.wfile.write("FILE %s" % cgi.escape(repr(fp))) #self.wfile.write("%s\n" % fp.name) # fails for StringIO instances self.wfile.write("%s\n" % repr(fp)) # two ways to get file contents #file_contents = self.fs.getvalue(k) #file_contents = fp.read() #self.wfile.write("%s" % file_contents) else: #plain-old key/value self.wfile.write("%s\n" % self.fs.getvalue(k)) else: self.wfile.write("No args\n") class PlainHttpd: def __init__(self, port=0, root=None): self.port = int(port) self.stop_event = threading.Event() self.stop_event.set() self.root = root self.server = BaseHTTPServer.HTTPServer(('', self.port), _HTTPRequestHandler) if self.port == 0: self.port = self.server.socket.getsockname()[1] self.server.root = self.root def _server_thread(self): while not self.stop_event.isSet(): self.server.handle_request() def start(self): # spawn thread print " HTTPd: serving requests on http://127.0.0.1:%d" % self.port t = threading.Thread(target=self._server_thread) t.setDaemon(1) self.stop_event.clear() t.start() def stop(self): if not self.stop_event.isSet(): self.stop_event.set() try: # create a request in order to release the handler import urllib urllib.urlopen("http://localhost:%d" % self.port) except: pass self.server.socket.close() def main(): import os # initialize the server, with current local working directory as root server = PlainHttpd(0, ".") # get a dynamically assigned port number port = server.port # start handling requests server.start() # now launch a browser pointing at our server import webbrowser webbrowser.open("http://127.0.0.1:%d"%port) if __name__ in [ '__main__', 'pymol' ]: # intended to be launched with normal Python or # pymol -qc justhttpd.py main() import time while 1: time.sleep(1)
gratefulfrog/lib
python/web/examples/justhttpd.py
Python
gpl-2.0
7,664
[ "PyMOL" ]
8fc4e6180363c456b2499a756225477d23406e2699aa581073a7e3aa97d5f6f6
# ============================================================================ # # Copyright (C) 2007-2012 Conceptive Engineering bvba. All rights reserved. # www.conceptive.be / project-camelot@conceptive.be # # This file is part of the Camelot Library. # # This file may be used under the terms of the GNU General Public # License version 2.0 as published by the Free Software Foundation # and appearing in the file license.txt included in the packaging of # this file. Please review this information to ensure GNU # General Public Licensing requirements will be met. # # If you are unsure which license is appropriate for your use, please # visit www.python-camelot.com or contact project-camelot@conceptive.be # # This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE # WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. # # For use of this library in commercial applications, please contact # project-camelot@conceptive.be # # ============================================================================ from customdelegate import CustomDelegate, DocumentationMetaclass, ValueLoading from camelot.view.controls import editors from camelot.core.utils import variant_to_pyobject from PyQt4 import QtCore from PyQt4.QtCore import Qt class DateTimeDelegate(CustomDelegate): __metaclass__ = DocumentationMetaclass editor = editors.DateTimeEditor def __init__(self, parent=None, editable=True, **kwargs): CustomDelegate.__init__(self, parent, editable=editable, **kwargs) locale = QtCore.QLocale() self.datetime_format = locale.dateTimeFormat(locale.ShortFormat) def paint(self, painter, option, index): painter.save() self.drawBackground(painter, option, index) value = variant_to_pyobject( index.model().data( index, Qt.EditRole ) ) value_str = u'' if value not in (None, ValueLoading): date_time = QtCore.QDateTime( value.year, value.month, value.day, value.hour, value.minute, value.second ) value_str = date_time.toString(self.datetime_format) self.paint_text(painter, option, index, value_str, horizontal_align=Qt.AlignRight) painter.restore()
jeroendierckx/Camelot
camelot/view/controls/delegates/datetimedelegate.py
Python
gpl-2.0
2,388
[ "VisIt" ]
844251d0755deb796cd8b44e75fcba8a73463bf42fb0966c319870adf189860c
# encoding: utf-8 """ The Base Application class for IPython.parallel apps Authors: * Brian Granger * Min RK """ #----------------------------------------------------------------------------- # Copyright (C) 2008-2011 The IPython Development Team # # Distributed under the terms of the BSD License. The full license is in # the file COPYING, distributed as part of this software. #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- import os import logging import re import sys from subprocess import Popen, PIPE from IPython.config.application import catch_config_error, LevelFormatter from IPython.core import release from IPython.core.crashhandler import CrashHandler from IPython.core.application import ( BaseIPythonApplication, base_aliases as base_ip_aliases, base_flags as base_ip_flags ) from IPython.utils.path import expand_path from IPython.utils import py3compat from IPython.utils.py3compat import unicode_type from IPython.utils.traitlets import Unicode, Bool, Instance, Dict #----------------------------------------------------------------------------- # Module errors #----------------------------------------------------------------------------- class PIDFileError(Exception): pass #----------------------------------------------------------------------------- # Crash handler for this application #----------------------------------------------------------------------------- class ParallelCrashHandler(CrashHandler): """sys.excepthook for IPython itself, leaves a detailed report on disk.""" def __init__(self, app): contact_name = release.authors['Min'][0] contact_email = release.author_email bug_tracker = 'https://github.com/ipython/ipython/issues' super(ParallelCrashHandler,self).__init__( app, contact_name, contact_email, bug_tracker ) #----------------------------------------------------------------------------- # Main application #----------------------------------------------------------------------------- base_aliases = {} base_aliases.update(base_ip_aliases) base_aliases.update({ 'work-dir' : 'BaseParallelApplication.work_dir', 'log-to-file' : 'BaseParallelApplication.log_to_file', 'clean-logs' : 'BaseParallelApplication.clean_logs', 'log-url' : 'BaseParallelApplication.log_url', 'cluster-id' : 'BaseParallelApplication.cluster_id', }) base_flags = { 'log-to-file' : ( {'BaseParallelApplication' : {'log_to_file' : True}}, "send log output to a file" ) } base_flags.update(base_ip_flags) class BaseParallelApplication(BaseIPythonApplication): """The base Application for IPython.parallel apps Principle extensions to BaseIPyythonApplication: * work_dir * remote logging via pyzmq * IOLoop instance """ crash_handler_class = ParallelCrashHandler def _log_level_default(self): # temporarily override default_log_level to INFO return logging.INFO def _log_format_default(self): """override default log format to include time""" return u"%(asctime)s.%(msecs).03d [%(name)s]%(highlevel)s %(message)s" work_dir = Unicode(py3compat.getcwd(), config=True, help='Set the working dir for the process.' ) def _work_dir_changed(self, name, old, new): self.work_dir = unicode_type(expand_path(new)) log_to_file = Bool(config=True, help="whether to log to a file") clean_logs = Bool(False, config=True, help="whether to cleanup old logfiles before starting") log_url = Unicode('', config=True, help="The ZMQ URL of the iplogger to aggregate logging.") cluster_id = Unicode('', config=True, help="""String id to add to runtime files, to prevent name collisions when using multiple clusters with a single profile simultaneously. When set, files will be named like: 'ipcontroller-<cluster_id>-engine.json' Since this is text inserted into filenames, typical recommendations apply: Simple character strings are ideal, and spaces are not recommended (but should generally work). """ ) def _cluster_id_changed(self, name, old, new): self.name = self.__class__.name if new: self.name += '-%s'%new def _config_files_default(self): return ['ipcontroller_config.py', 'ipengine_config.py', 'ipcluster_config.py'] loop = Instance('zmq.eventloop.ioloop.IOLoop') def _loop_default(self): from zmq.eventloop.ioloop import IOLoop return IOLoop.instance() aliases = Dict(base_aliases) flags = Dict(base_flags) @catch_config_error def initialize(self, argv=None): """initialize the app""" super(BaseParallelApplication, self).initialize(argv) self.to_work_dir() self.reinit_logging() def to_work_dir(self): wd = self.work_dir if unicode_type(wd) != py3compat.getcwd(): os.chdir(wd) self.log.info("Changing to working dir: %s" % wd) # This is the working dir by now. sys.path.insert(0, '') def reinit_logging(self): # Remove old log files log_dir = self.profile_dir.log_dir if self.clean_logs: for f in os.listdir(log_dir): if re.match(r'%s-\d+\.(log|err|out)' % self.name, f): try: os.remove(os.path.join(log_dir, f)) except (OSError, IOError): # probably just conflict from sibling process # already removing it pass if self.log_to_file: # Start logging to the new log file log_filename = self.name + u'-' + str(os.getpid()) + u'.log' logfile = os.path.join(log_dir, log_filename) open_log_file = open(logfile, 'w') else: open_log_file = None if open_log_file is not None: while self.log.handlers: self.log.removeHandler(self.log.handlers[0]) self._log_handler = logging.StreamHandler(open_log_file) self.log.addHandler(self._log_handler) else: self._log_handler = self.log.handlers[0] # Add timestamps to log format: self._log_formatter = LevelFormatter(self.log_format, datefmt=self.log_datefmt) self._log_handler.setFormatter(self._log_formatter) # do not propagate log messages to root logger # ipcluster app will sometimes print duplicate messages during shutdown # if this is 1 (default): self.log.propagate = False def write_pid_file(self, overwrite=False): """Create a .pid file in the pid_dir with my pid. This must be called after pre_construct, which sets `self.pid_dir`. This raises :exc:`PIDFileError` if the pid file exists already. """ pid_file = os.path.join(self.profile_dir.pid_dir, self.name + u'.pid') if os.path.isfile(pid_file): pid = self.get_pid_from_file() if not overwrite: raise PIDFileError( 'The pid file [%s] already exists. \nThis could mean that this ' 'server is already running with [pid=%s].' % (pid_file, pid) ) with open(pid_file, 'w') as f: self.log.info("Creating pid file: %s" % pid_file) f.write(repr(os.getpid())+'\n') def remove_pid_file(self): """Remove the pid file. This should be called at shutdown by registering a callback with :func:`reactor.addSystemEventTrigger`. This needs to return ``None``. """ pid_file = os.path.join(self.profile_dir.pid_dir, self.name + u'.pid') if os.path.isfile(pid_file): try: self.log.info("Removing pid file: %s" % pid_file) os.remove(pid_file) except: self.log.warn("Error removing the pid file: %s" % pid_file) def get_pid_from_file(self): """Get the pid from the pid file. If the pid file doesn't exist a :exc:`PIDFileError` is raised. """ pid_file = os.path.join(self.profile_dir.pid_dir, self.name + u'.pid') if os.path.isfile(pid_file): with open(pid_file, 'r') as f: s = f.read().strip() try: pid = int(s) except: raise PIDFileError("invalid pid file: %s (contents: %r)"%(pid_file, s)) return pid else: raise PIDFileError('pid file not found: %s' % pid_file) def check_pid(self, pid): if os.name == 'nt': try: import ctypes # returns 0 if no such process (of ours) exists # positive int otherwise p = ctypes.windll.kernel32.OpenProcess(1,0,pid) except Exception: self.log.warn( "Could not determine whether pid %i is running via `OpenProcess`. " " Making the likely assumption that it is."%pid ) return True return bool(p) else: try: p = Popen(['ps','x'], stdout=PIPE, stderr=PIPE) output,_ = p.communicate() except OSError: self.log.warn( "Could not determine whether pid %i is running via `ps x`. " " Making the likely assumption that it is."%pid ) return True pids = list(map(int, re.findall(br'^\W*\d+', output, re.MULTILINE))) return pid in pids
WillisXChen/django-oscar
oscar/lib/python2.7/site-packages/IPython/parallel/apps/baseapp.py
Python
bsd-3-clause
10,064
[ "Brian" ]
78f3b99b83ad61b83e08ba894773beb4ed9260f92f18b80af29fff8ae850ea72
import CogHQLoader import LawbotHQBossBattle import LawbotHQExterior import LawbotOfficeExterior import StageInterior from direct.directnotify import DirectNotifyGlobal from direct.fsm import State from direct.fsm import StateData from direct.gui import DirectGui from toontown.toon import Toon from toontown.toonbase import TTLocalizer from toontown.toonbase import ToontownGlobals aspectSF = 0.7227 class LawbotCogHQLoader(CogHQLoader.CogHQLoader): notify = DirectNotifyGlobal.directNotify.newCategory('LawbotCogHQLoader') def __init__(self, hood, parentFSMState, doneEvent): CogHQLoader.CogHQLoader.__init__(self, hood, parentFSMState, doneEvent) self.fsm.addState(State.State('stageInterior', self.enterStageInterior, self.exitStageInterior, ['quietZone', 'cogHQExterior'])) self.fsm.addState(State.State('factoryExterior', self.enterFactoryExterior, self.exitFactoryExterior, ['quietZone', 'cogHQExterior'])) for stateName in ['start', 'cogHQExterior', 'quietZone']: state = self.fsm.getStateNamed(stateName) state.addTransition('stageInterior') for stateName in ['quietZone']: state = self.fsm.getStateNamed(stateName) state.addTransition('factoryExterior') self.musicFile = 'phase_11/audio/bgm/LB_courtyard.ogg' self.cogHQExteriorModelPath = 'phase_11/models/lawbotHQ/LawbotPlaza' self.factoryExteriorModelPath = 'phase_11/models/lawbotHQ/LB_DA_Lobby' self.cogHQLobbyModelPath = 'phase_11/models/lawbotHQ/LB_CH_Lobby' self.geom = None def load(self, zoneId): CogHQLoader.CogHQLoader.load(self, zoneId) Toon.loadSellbotHQAnims() def unloadPlaceGeom(self): if self.geom: self.geom.removeNode() self.geom = None CogHQLoader.CogHQLoader.unloadPlaceGeom(self) def loadPlaceGeom(self, zoneId): self.notify.info('loadPlaceGeom: %s' % zoneId) zoneId = zoneId - zoneId % 100 self.notify.debug('zoneId = %d ToontownGlobals.LawbotHQ=%d' % (zoneId, ToontownGlobals.LawbotHQ)) if zoneId == ToontownGlobals.LawbotHQ: self.geom = loader.loadModel(self.cogHQExteriorModelPath) ug = self.geom.find('**/underground') ug.setBin('ground', -10) brLinkTunnel = self.geom.find('**/TunnelEntrance1') brLinkTunnel.setName('linktunnel_br_3326_DNARoot') elif zoneId == ToontownGlobals.LawbotOfficeExt: self.geom = loader.loadModel(self.factoryExteriorModelPath) ug = self.geom.find('**/underground') ug.setBin('ground', -10) self.geom.flattenMedium() elif zoneId == ToontownGlobals.LawbotLobby: if base.config.GetBool('want-qa-regression', 0): self.notify.info('QA-REGRESSION: COGHQ: Visit LawbotLobby') self.notify.debug('cogHQLobbyModelPath = %s' % self.cogHQLobbyModelPath) self.geom = loader.loadModel(self.cogHQLobbyModelPath) ug = self.geom.find('**/underground') ug.setBin('ground', -10) else: self.notify.warning('loadPlaceGeom: unclassified zone %s' % zoneId) CogHQLoader.CogHQLoader.loadPlaceGeom(self, zoneId) def unload(self): CogHQLoader.CogHQLoader.unload(self) Toon.unloadSellbotHQAnims() def enterStageInterior(self, requestStatus): self.placeClass = StageInterior.StageInterior self.stageId = requestStatus['stageId'] self.enterPlace(requestStatus) def exitStageInterior(self): self.exitPlace() self.placeClass = None return def getExteriorPlaceClass(self): self.notify.debug('getExteriorPlaceClass') return LawbotHQExterior.LawbotHQExterior def getBossPlaceClass(self): self.notify.debug('getBossPlaceClass') return LawbotHQBossBattle.LawbotHQBossBattle def enterFactoryExterior(self, requestStatus): self.placeClass = LawbotOfficeExterior.LawbotOfficeExterior self.enterPlace(requestStatus) self.hood.spawnTitleText(requestStatus['zoneId']) def exitFactoryExterior(self): taskMgr.remove('titleText') self.hood.hideTitleText() self.exitPlace() self.placeClass = None return def enterCogHQBossBattle(self, requestStatus): self.notify.debug('LawbotCogHQLoader.enterCogHQBossBattle') CogHQLoader.CogHQLoader.enterCogHQBossBattle(self, requestStatus) base.cr.forbidCheesyEffects(1) def exitCogHQBossBattle(self): self.notify.debug('LawbotCogHQLoader.exitCogHQBossBattle') CogHQLoader.CogHQLoader.exitCogHQBossBattle(self) base.cr.forbidCheesyEffects(0)
ToontownUprising/src
toontown/coghq/LawbotCogHQLoader.py
Python
mit
4,773
[ "VisIt" ]
20d051c67cb9a6ab7fdf80b69ef762e4cdec7b3cb70103f813edac89ef6b3a04
""" Bok choy acceptance and a11y tests for problem types in the LMS See also lettuce tests in lms/djangoapps/courseware/features/problems.feature """ import random import textwrap from nose import SkipTest from abc import ABCMeta, abstractmethod from nose.plugins.attrib import attr from selenium.webdriver import ActionChains from capa.tests.response_xml_factory import ( AnnotationResponseXMLFactory, ChoiceResponseXMLFactory, ChoiceTextResponseXMLFactory, CodeResponseXMLFactory, CustomResponseXMLFactory, FormulaResponseXMLFactory, ImageResponseXMLFactory, MultipleChoiceResponseXMLFactory, NumericalResponseXMLFactory, OptionResponseXMLFactory, StringResponseXMLFactory, SymbolicResponseXMLFactory, ) from common.test.acceptance.fixtures.course import XBlockFixtureDesc from common.test.acceptance.pages.lms.problem import ProblemPage from common.test.acceptance.tests.helpers import select_option_by_text from common.test.acceptance.tests.lms.test_lms_problems import ProblemsTest from common.test.acceptance.tests.helpers import EventsTestMixin class ProblemTypeTestBaseMeta(ABCMeta): """ MetaClass for ProblemTypeTestBase to ensure that the required attributes are defined in the inheriting classes. """ def __call__(cls, *args, **kwargs): obj = type.__call__(cls, *args, **kwargs) required_attrs = [ 'problem_name', 'problem_type', 'factory', 'factory_kwargs', 'status_indicators', ] for required_attr in required_attrs: msg = ('{} is a required attribute for {}').format( required_attr, str(cls) ) try: if obj.__getattribute__(required_attr) is None: raise NotImplementedError(msg) except AttributeError: raise NotImplementedError(msg) return obj class ProblemTypeTestBase(ProblemsTest, EventsTestMixin): """ Base class for testing assesment problem types in bok choy. This inherits from ProblemsTest, which has capabilities for testing problem features that are not problem type specific (checking, hinting, etc.). The following attributes must be explicitly defined when inheriting from this class: problem_name (str) problem_type (str) factory (ResponseXMLFactory subclass instance) Additionally, the default values for factory_kwargs and status_indicators may need to be overridden for some problem types. """ __metaclass__ = ProblemTypeTestBaseMeta problem_name = None problem_type = None factory = None factory_kwargs = {} status_indicators = { 'correct': ['span.correct'], 'incorrect': ['span.incorrect'], 'unanswered': ['span.unanswered'], } def setUp(self): """ Visits courseware_page and defines self.problem_page. """ super(ProblemTypeTestBase, self).setUp() self.courseware_page.visit() self.problem_page = ProblemPage(self.browser) def get_problem(self): """ Creates a {problem_type} problem """ # Generate the problem XML using capa.tests.response_xml_factory return XBlockFixtureDesc( 'problem', self.problem_name, data=self.factory.build_xml(**self.factory_kwargs), metadata={'rerandomize': 'always'} ) def wait_for_status(self, status): """ Waits for the expected status indicator. Args: status: one of ("correct", "incorrect", "unanswered) """ msg = "Wait for status to be {}".format(status) selector = ', '.join(self.status_indicators[status]) self.problem_page.wait_for_element_visibility(selector, msg) @abstractmethod def answer_problem(self, correct): """ Args: `correct` (bool): Inputs correct answer if True, else inputs incorrect answer. """ raise NotImplementedError() class ProblemTypeTestMixin(object): """ Test cases shared amongst problem types. """ can_submit_blank = False @attr(shard=7) def test_answer_correctly(self): """ Scenario: I can answer a problem correctly Given External graders respond "correct" And I am viewing a "<ProblemType>" problem When I answer a "<ProblemType>" problem "correctly" Then my "<ProblemType>" answer is marked "correct" And The "<ProblemType>" problem displays a "correct" answer And a "problem_check" server event is emitted And a "problem_check" browser event is emitted """ # Make sure we're looking at the right problem self.assertEqual(self.problem_page.problem_name, self.problem_name) # Answer the problem correctly self.answer_problem(correct=True) self.problem_page.click_check() self.wait_for_status('correct') # Check for corresponding tracking event expected_events = [ { 'event_source': 'server', 'event_type': 'problem_check', 'username': self.username, }, { 'event_source': 'browser', 'event_type': 'problem_check', 'username': self.username, }, ] for event in expected_events: self.wait_for_events(event_filter=event, number_of_matches=1) @attr(shard=7) def test_answer_incorrectly(self): """ Scenario: I can answer a problem incorrectly Given External graders respond "incorrect" And I am viewing a "<ProblemType>" problem When I answer a "<ProblemType>" problem "incorrectly" Then my "<ProblemType>" answer is marked "incorrect" And The "<ProblemType>" problem displays a "incorrect" answer """ self.problem_page.wait_for( lambda: self.problem_page.problem_name == self.problem_name, "Make sure the correct problem is on the page" ) # Answer the problem incorrectly self.answer_problem(correct=False) self.problem_page.click_check() self.wait_for_status('incorrect') @attr(shard=7) def test_submit_blank_answer(self): """ Scenario: I can submit a blank answer Given I am viewing a "<ProblemType>" problem When I check a problem Then my "<ProblemType>" answer is marked "incorrect" And The "<ProblemType>" problem displays a "blank" answer """ if not self.can_submit_blank: raise SkipTest("Test incompatible with the current problem type") self.problem_page.wait_for( lambda: self.problem_page.problem_name == self.problem_name, "Make sure the correct problem is on the page" ) # Leave the problem unchanged and click check. self.assertNotIn('is-disabled', self.problem_page.q(css='div.problem button.check').attrs('class')[0]) self.problem_page.click_check() self.wait_for_status('incorrect') @attr(shard=7) def test_cant_submit_blank_answer(self): """ Scenario: I can't submit a blank answer When I try to submit blank answer Then I can't check a problem """ if self.can_submit_blank: raise SkipTest("Test incompatible with the current problem type") self.problem_page.wait_for( lambda: self.problem_page.problem_name == self.problem_name, "Make sure the correct problem is on the page" ) self.assertIn('is-disabled', self.problem_page.q(css='div.problem button.check').attrs('class')[0]) @attr('a11y') def test_problem_type_a11y(self): """ Run accessibility audit for the problem type. """ self.problem_page.wait_for( lambda: self.problem_page.problem_name == self.problem_name, "Make sure the correct problem is on the page" ) # Set the scope to the problem container self.problem_page.a11y_audit.config.set_scope( include=['div#seq_content']) self.problem_page.a11y_audit.config.set_rules({ "ignore": [ 'aria-allowed-attr', # TODO: AC-491 'aria-valid-attr', # TODO: AC-491 'aria-roles', # TODO: AC-491 'checkboxgroup', # TODO: AC-491 'radiogroup', # TODO: AC-491 'section', # TODO: AC-491 'label', # TODO: AC-491 ] }) # Run the accessibility audit. self.problem_page.a11y_audit.check_for_accessibility_errors() class AnnotationProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin): """ TestCase Class for Annotation Problem Type """ problem_name = 'ANNOTATION TEST PROBLEM' problem_type = 'annotationresponse' factory = AnnotationResponseXMLFactory() can_submit_blank = True factory_kwargs = { 'title': 'Annotation Problem', 'text': 'The text being annotated', 'comment': 'What do you think the about this text?', 'comment_prompt': 'Type your answer below.', 'tag_prompt': 'Which of these items most applies to the text?', 'options': [ ('dog', 'correct'), ('cat', 'incorrect'), ('fish', 'partially-correct'), ] } status_indicators = { 'correct': ['span.correct'], 'incorrect': ['span.incorrect'], 'partially-correct': ['span.partially-correct'], 'unanswered': ['span.unanswered'], } def setUp(self, *args, **kwargs): """ Additional setup for AnnotationProblemTypeTest """ super(AnnotationProblemTypeTest, self).setUp(*args, **kwargs) def answer_problem(self, correct): """ Answer annotation problem. """ choice = 0 if correct else 1 answer = 'Student comment' self.problem_page.q(css='div.problem textarea.comment').fill(answer) self.problem_page.q( css='div.problem span.tag'.format(choice=choice) ).nth(choice).click() class CheckboxProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin): """ TestCase Class for Checkbox Problem Type """ problem_name = 'CHECKBOX TEST PROBLEM' problem_type = 'checkbox' factory = ChoiceResponseXMLFactory() factory_kwargs = { 'question_text': 'The correct answer is Choice 0 and Choice 2', 'choice_type': 'checkbox', 'choices': [True, False, True, False], 'choice_names': ['Choice 0', 'Choice 1', 'Choice 2', 'Choice 3'] } def setUp(self, *args, **kwargs): """ Additional setup for CheckboxProblemTypeTest """ super(CheckboxProblemTypeTest, self).setUp(*args, **kwargs) self.problem_page.a11y_audit.config.set_rules({ 'ignore': [ 'section', # TODO: AC-491 'aria-allowed-attr', # TODO: AC-251 'aria-valid-attr', # TODO: AC-251 'aria-roles', # TODO: AC-251 'checkboxgroup', # TODO: AC-251 ] }) def answer_problem(self, correct): """ Answer checkbox problem. """ if correct: self.problem_page.click_choice("choice_0") self.problem_page.click_choice("choice_2") else: self.problem_page.click_choice("choice_1") class MultipleChoiceProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin): """ TestCase Class for Multiple Choice Problem Type """ problem_name = 'MULTIPLE CHOICE TEST PROBLEM' problem_type = 'multiple choice' factory = MultipleChoiceResponseXMLFactory() factory_kwargs = { 'question_text': 'The correct answer is Choice 2', 'choices': [False, False, True, False], 'choice_names': ['choice_0', 'choice_1', 'choice_2', 'choice_3'], } status_indicators = { 'correct': ['label.choicegroup_correct'], 'incorrect': ['label.choicegroup_incorrect', 'span.incorrect'], 'unanswered': ['span.unanswered'], } def setUp(self, *args, **kwargs): """ Additional setup for MultipleChoiceProblemTypeTest """ super(MultipleChoiceProblemTypeTest, self).setUp(*args, **kwargs) self.problem_page.a11y_audit.config.set_rules({ 'ignore': [ 'section', # TODO: AC-491 'aria-valid-attr', # TODO: AC-251 'radiogroup', # TODO: AC-251 ] }) def answer_problem(self, correct): """ Answer multiple choice problem. """ if correct: self.problem_page.click_choice("choice_choice_2") else: self.problem_page.click_choice("choice_choice_1") class RadioProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin): """ TestCase Class for Radio Problem Type """ problem_name = 'RADIO TEST PROBLEM' problem_type = 'radio' factory = ChoiceResponseXMLFactory() factory_kwargs = { 'question_text': 'The correct answer is Choice 2', 'choice_type': 'radio', 'choices': [False, False, True, False], 'choice_names': ['Choice 0', 'Choice 1', 'Choice 2', 'Choice 3'], } status_indicators = { 'correct': ['label.choicegroup_correct'], 'incorrect': ['label.choicegroup_incorrect', 'span.incorrect'], 'unanswered': ['span.unanswered'], } def setUp(self, *args, **kwargs): """ Additional setup for RadioProblemTypeTest """ super(RadioProblemTypeTest, self).setUp(*args, **kwargs) self.problem_page.a11y_audit.config.set_rules({ 'ignore': [ 'section', # TODO: AC-491 'aria-valid-attr', # TODO: AC-292 'radiogroup', # TODO: AC-292 ] }) def answer_problem(self, correct): """ Answer radio problem. """ if correct: self.problem_page.click_choice("choice_2") else: self.problem_page.click_choice("choice_1") class DropDownProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin): """ TestCase Class for Drop Down Problem Type """ problem_name = 'DROP DOWN TEST PROBLEM' problem_type = 'drop down' factory = OptionResponseXMLFactory() factory_kwargs = { 'question_text': 'The correct answer is Option 2', 'options': ['Option 1', 'Option 2', 'Option 3', 'Option 4'], 'correct_option': 'Option 2' } def setUp(self, *args, **kwargs): """ Additional setup for DropDownProblemTypeTest """ super(DropDownProblemTypeTest, self).setUp(*args, **kwargs) self.problem_page.a11y_audit.config.set_rules({ 'ignore': [ 'section', # TODO: AC-491 'label', # TODO: AC-291 ] }) def answer_problem(self, correct): """ Answer drop down problem. """ answer = 'Option 2' if correct else 'Option 3' selector_element = self.problem_page.q( css='.problem .option-input select') select_option_by_text(selector_element, answer) class StringProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin): """ TestCase Class for String Problem Type """ problem_name = 'STRING TEST PROBLEM' problem_type = 'string' factory = StringResponseXMLFactory() factory_kwargs = { 'question_text': 'The answer is "correct string"', 'case_sensitive': False, 'answer': 'correct string', } status_indicators = { 'correct': ['div.correct'], 'incorrect': ['div.incorrect'], 'unanswered': ['div.unanswered', 'div.unsubmitted'], } def setUp(self, *args, **kwargs): """ Additional setup for StringProblemTypeTest """ super(StringProblemTypeTest, self).setUp(*args, **kwargs) self.problem_page.a11y_audit.config.set_rules({ 'ignore': [ 'section', # TODO: AC-491 'label', # TODO: AC-290 ] }) def answer_problem(self, correct): """ Answer string problem. """ textvalue = 'correct string' if correct else 'incorrect string' self.problem_page.fill_answer(textvalue) class NumericalProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin): """ TestCase Class for Numerical Problem Type """ problem_name = 'NUMERICAL TEST PROBLEM' problem_type = 'numerical' factory = NumericalResponseXMLFactory() factory_kwargs = { 'question_text': 'The answer is pi + 1', 'answer': '4.14159', 'tolerance': '0.00001', 'math_display': True, } status_indicators = { 'correct': ['div.correct'], 'incorrect': ['div.incorrect'], 'unanswered': ['div.unanswered', 'div.unsubmitted'], } def setUp(self, *args, **kwargs): """ Additional setup for NumericalProblemTypeTest """ super(NumericalProblemTypeTest, self).setUp(*args, **kwargs) self.problem_page.a11y_audit.config.set_rules({ 'ignore': [ 'section', # TODO: AC-491 'label', # TODO: AC-289 ] }) def answer_problem(self, correct): """ Answer numerical problem. """ textvalue = "pi + 1" if correct else str(random.randint(-2, 2)) self.problem_page.fill_answer(textvalue) class FormulaProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin): """ TestCase Class for Formula Problem Type """ problem_name = 'FORMULA TEST PROBLEM' problem_type = 'formula' factory = FormulaResponseXMLFactory() factory_kwargs = { 'question_text': 'The solution is [mathjax]x^2+2x+y[/mathjax]', 'sample_dict': {'x': (-100, 100), 'y': (-100, 100)}, 'num_samples': 10, 'tolerance': 0.00001, 'math_display': True, 'answer': 'x^2+2*x+y', } status_indicators = { 'correct': ['div.correct'], 'incorrect': ['div.incorrect'], 'unanswered': ['div.unanswered', 'div.unsubmitted'], } def setUp(self, *args, **kwargs): """ Additional setup for FormulaProblemTypeTest """ super(FormulaProblemTypeTest, self).setUp(*args, **kwargs) self.problem_page.a11y_audit.config.set_rules({ 'ignore': [ 'section', # TODO: AC-491 'label', # TODO: AC-288 ] }) def answer_problem(self, correct): """ Answer formula problem. """ textvalue = "x^2+2*x+y" if correct else 'x^2' self.problem_page.fill_answer(textvalue) class ScriptProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin): """ TestCase Class for Script Problem Type """ problem_name = 'SCRIPT TEST PROBLEM' problem_type = 'script' factory = CustomResponseXMLFactory() factory_kwargs = { 'question_text': 'Enter two integers that sum to 10.', 'cfn': 'test_add_to_ten', 'expect': '10', 'num_inputs': 2, 'script': textwrap.dedent(""" def test_add_to_ten(expect,ans): try: a1=int(ans[0]) a2=int(ans[1]) except ValueError: a1=0 a2=0 return (a1+a2)==int(expect) """), } status_indicators = { 'correct': ['div.correct'], 'incorrect': ['div.incorrect'], 'unanswered': ['div.unanswered', 'div.unsubmitted'], } def setUp(self, *args, **kwargs): """ Additional setup for ScriptProblemTypeTest """ super(ScriptProblemTypeTest, self).setUp(*args, **kwargs) self.problem_page.a11y_audit.config.set_rules({ 'ignore': [ 'section', # TODO: AC-491 'label', # TODO: AC-287 ] }) def answer_problem(self, correct): """ Answer script problem. """ # Correct answer is any two integers that sum to 10 first_addend = random.randint(-100, 100) second_addend = 10 - first_addend # If we want an incorrect answer, then change # the second addend so they no longer sum to 10 if not correct: second_addend += random.randint(1, 10) self.problem_page.fill_answer(first_addend, input_num=0) self.problem_page.fill_answer(second_addend, input_num=1) class CodeProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin): """ TestCase Class for Code Problem Type """ problem_name = 'CODE TEST PROBLEM' problem_type = 'code' factory = CodeResponseXMLFactory() factory_kwargs = { 'question_text': 'Submit code to an external grader', 'initial_display': 'print "Hello world!"', 'grader_payload': '{"grader": "ps1/Spring2013/test_grader.py"}', } status_indicators = { 'correct': ['.grader-status .correct ~ .debug'], 'incorrect': ['.grader-status .incorrect ~ .debug'], 'unanswered': ['.grader-status .unanswered ~ .debug'], } def setUp(self, *args, **kwargs): """ Additional setup for CodeProblemTypeTest """ super(CodeProblemTypeTest, self).setUp(*args, **kwargs) self.problem_page.a11y_audit.config.set_rules({ 'ignore': [ 'section', # TODO: AC-491 'label', # TODO: AC-286 ] }) def answer_problem(self, correct): """ Answer code problem. """ # The fake xqueue server is configured to respond # correct / incorrect no matter what we submit. # Furthermore, since the inline code response uses # JavaScript to make the code display nicely, it's difficult # to programatically input text # (there's not <textarea> we can just fill text into) # For this reason, we submit the initial code in the response # (configured in the problem XML above) pass def test_answer_incorrectly(self): """ Overridden for script test because the testing grader always responds with "correct" """ pass def test_submit_blank_answer(self): """ Overridden for script test because the testing grader always responds with "correct" """ pass def test_cant_submit_blank_answer(self): """ Overridden for script test because the testing grader always responds with "correct" """ pass class ChoiceTextProbelmTypeTestBase(ProblemTypeTestBase): """ Base class for "Choice + Text" Problem Types. (e.g. RadioText, CheckboxText) """ choice_type = None def _select_choice(self, input_num): """ Selects the nth (where n == input_num) choice of the problem. """ self.problem_page.q( css='div.problem input.ctinput[type="{}"]'.format(self.choice_type) ).nth(input_num).click() def _fill_input_text(self, value, input_num): """ Fills the nth (where n == input_num) text input field of the problem with value. """ self.problem_page.q( css='div.problem input.ctinput[type="text"]' ).nth(input_num).fill(value) def answer_problem(self, correct): """ Answer radio text problem. """ choice = 0 if correct else 1 input_value = "8" if correct else "5" self._select_choice(choice) self._fill_input_text(input_value, choice) class RadioTextProblemTypeTest(ChoiceTextProbelmTypeTestBase, ProblemTypeTestMixin): """ TestCase Class for Radio Text Problem Type """ problem_name = 'RADIO TEXT TEST PROBLEM' problem_type = 'radio_text' choice_type = 'radio' factory = ChoiceTextResponseXMLFactory() factory_kwargs = { 'question_text': 'The correct answer is Choice 0 and input 8', 'type': 'radiotextgroup', 'choices': [ ("true", {"answer": "8", "tolerance": "1"}), ("false", {"answer": "8", "tolerance": "1"}), ], } status_indicators = { 'correct': ['section.choicetextgroup_correct'], 'incorrect': ['section.choicetextgroup_incorrect', 'span.incorrect'], 'unanswered': ['span.unanswered'], } def setUp(self, *args, **kwargs): """ Additional setup for RadioTextProblemTypeTest """ super(RadioTextProblemTypeTest, self).setUp(*args, **kwargs) self.problem_page.a11y_audit.config.set_rules({ 'ignore': [ 'section', # TODO: AC-491 'label', # TODO: AC-285 'radiogroup', # TODO: AC-285 ] }) class CheckboxTextProblemTypeTest(ChoiceTextProbelmTypeTestBase, ProblemTypeTestMixin): """ TestCase Class for Checkbox Text Problem Type """ problem_name = 'CHECKBOX TEXT TEST PROBLEM' problem_type = 'checkbox_text' choice_type = 'checkbox' factory = ChoiceTextResponseXMLFactory() factory_kwargs = { 'question_text': 'The correct answer is Choice 0 and input 8', 'type': 'checkboxtextgroup', 'choices': [ ("true", {"answer": "8", "tolerance": "1"}), ("false", {"answer": "8", "tolerance": "1"}), ], } def setUp(self, *args, **kwargs): """ Additional setup for CheckboxTextProblemTypeTest """ super(CheckboxTextProblemTypeTest, self).setUp(*args, **kwargs) self.problem_page.a11y_audit.config.set_rules({ 'ignore': [ 'section', # TODO: AC-491 'label', # TODO: AC-284 'checkboxgroup', # TODO: AC-284 ] }) class ImageProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin): """ TestCase Class for Image Problem Type """ problem_name = 'IMAGE TEST PROBLEM' problem_type = 'image' factory = ImageResponseXMLFactory() can_submit_blank = True factory_kwargs = { 'src': '/static/images/placeholder-image.png', 'rectangle': '(0,0)-(50,50)', } def answer_problem(self, correct): """ Answer image problem. """ offset = 25 if correct else -25 input_selector = ".imageinput [id^='imageinput_'] img" input_element = self.problem_page.q(css=input_selector)[0] chain = ActionChains(self.browser) chain.move_to_element(input_element) chain.move_by_offset(offset, offset) chain.click() chain.perform() class SymbolicProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin): """ TestCase Class for Symbolic Problem Type """ problem_name = 'SYMBOLIC TEST PROBLEM' problem_type = 'symbolicresponse' factory = SymbolicResponseXMLFactory() factory_kwargs = { 'expect': '2*x+3*y', } status_indicators = { 'correct': ['span div.correct'], 'incorrect': ['span div.incorrect'], 'unanswered': ['span div.unanswered'], } def setUp(self, *args, **kwargs): """ Additional setup for SymbolicProblemTypeTest """ super(SymbolicProblemTypeTest, self).setUp(*args, **kwargs) self.problem_page.a11y_audit.config.set_rules({ 'ignore': [ 'section', # TODO: AC-491 'label', # TODO: AC-294 ] }) def answer_problem(self, correct): """ Answer symbolic problem. """ choice = "2*x+3*y" if correct else "3*a+4*b" self.problem_page.fill_answer(choice)
longmen21/edx-platform
common/test/acceptance/tests/lms/test_problem_types.py
Python
agpl-3.0
28,323
[ "VisIt" ]
dee0f035812a7f77dfab4a3e7054613e0bf50319a78ac1551446c028733f2c35
""" (This script is independent from lfc_dfc_copy) This script is used to migrate the content of the LFC DB to the DFC DB when used with Stored procedure and Foreign keys. It won't work with the other schema. It is the central component of the migration. * Please read the doc of each method in this script, there are important informations. * You need to have a "clean" LFC db. That is, the LFC does not enforce consistency, and for example a file can belong to a user which is not in the DB. This will not work in the DFC. Hence, you must make sure you have no inconsistencies what so ever in the LFC. * This script assumes that you already created the DFC DB, with the schema. * While performing the migration, the LFC must be in read-only. I am speaking here about the LFC service, not only in the DIRAC service. There should be *zero* insertion in the LFC or the DFC dbs while running this script. Read further the migration explanations for more details. * This script must be executed with a user that can execute the DIRAC method getUsernameForDN and getGroupsWithVOMSAttribute. * The script is doing 2 consistency checks at the end. One checks real inconsistencies within the DFC (see 'databaseIntegrityCheck'), while the second one compares the amount of entities in the LFC with respect to the DFC (see 'compareNumbers'). Please pay attention to the doc of these methods. * A final step is to be done once the migration is over. You need to call the procedure 'ps_rebuild_directory_usage'. Indeed, during the migration, the storage usage is not updated. So if you are happy with the report of the consistency checks, go for it. * I strongly recommend to do a snapshot of your LFC DB to try the migration script multiple times before hand The global idea of the migration is as follow: * Put the LFC service in read-only mode (it doesn't harm, while write do...) * use this script to copy the data * Put again the LFC service in read-write, and set the LFC Write in the CS * The DFC should be put in Read-Write, so LFC and DFC will be in sync, but we read only from the DFC. When after few weeks, you are sure that the migration was successful, get rid of the LFC. Having 2 master catalogs is not a good idea... As for the migration itself. Here are some tips and recommendations. * While it is possible to keep all the system running, we STRONGLY recommend to have a deep downtime. * Declare ahead of time the DFC DB and services in the CS. * One day before, start draining the system (avoid new jobs submission and so on). * Just before starting the migration: - Stop the FTSAgent and the RequestExecutingAgent. - Set the LFC service in read only - go for the deep downtime, stop all the agents basically. (Caution, don't stop the ReqManager service, it is important to have it !) * Perform the migration: - run this script - if happy with the result, rebuild the storage usage * Restarting phase: - Mark the LFC in Read only in the CS - Declare the DFC as Read-Write catalog and Master. Also, it should be the first master catalog. - start the DFC services - Hotfix the ReqManager (see bellow) - restart *all* the services that interact with the DMS, so that they pick up the new configuration - you can restart all the agents If the migration is done this way: - the read action are never perturbed - the synchronous writes (with script) will fail - the jobs that are still running during the migration and that will attempt a write will end up creating a Request. A hotfix in the ReqManager is necessary though. During the migration, some RMS Requests might have been created, that concerns registration for the master catalog. For the jobs running during the migration, the master catalog was only the DFC, however you want that when these Requests are executed, they are also done in the DFC. The best for that is to hotfix the ReqManager, so that if an operation is targeted at the LFC, it will be modified to be targeted on both the LFC and the DFC. Providing you called your LFC 'LcgFileCatalogCombined', the fix is to be put in the Catalog setter of RequestManagementSystem/Client/Operation.py and would look something like: if value == "LcgFileCatalogCombined": value = "FileCatalog,LcgFileCatalogCombined" Note that for jobs that already have the new configuration, they will create 2 operations, one targeted at the DFC, the other one at the LFC. But with this fix, you will end up with one targeted at the DFC, the other one target at the LFC and the DFC. However, it should do no harm. But remember it when debugging... Anyway, this is only useful for Requests that were created during or before the downtime, or by job started with the old configuration. So a relatively limited amount of time... GOOD LUCK ! """ # source /afs/cern.ch/project/oracle/script/setoraenv.sh setoraenv -s 11203 # source /afs/cern.ch/project/oracle/script/setoraenv.csh setoraenv -s 11203 from DIRAC.Core.Base import Script Script.parseCommandLine() import cx_Oracle import time from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getUsernameForDN, getGroupsWithVOMSAttribute import os from datetime import datetime import MySQLdb as mdb import multiprocessing prodDbAccount = 'USERNAME' prodDbAccountPassword = 'PASSWORD' prodDbTNS = 'DB' dfc_db_host = 'HOST' dfc_db_port = 3306 dfc_db_user = 'USER' dfc_db_passwd = 'PASSWORD' dfc_db_name = 'FileCatalogDB' dfc_unix_socket = '/opt/dirac/mysql/db/mysql.sock' # Directory were all the logs are written logDir = '/tmp/migration' # Set this flag to True if you just want to test the beginning # of the migration procedure, and see if things are actually # being copied over earlyStopEnabled = False # How many files + directories you will insert before stopping earlyStop = 10000 # We are not committing every insert for performance reasons # This tells you how many queries each thread will execute before # issuing a commit commitThreshold = 10000 # These tables are used to convert the LFC files/replicas status # To the DFC one. # In the DFC DB, FakeStatus already exists, so these status # start to be numbered at 2 dirac_status = ['AprioriGood', 'Trash', 'Removing', 'Probing'] lfc_dfc_status = { 'P' : dirac_status.index( 'Trash' ) + 2 } # This is a cache that contains the mapping between # the uid found in the LFC to a dirac username uid_name = {} # Converts the mode to a posix mode permission def S_IMODE( mode ): return mode & 07777 # Utilities to make the distinction between files and dir from the LFC def S_IFMT( mode ): return mode & 0170000 S_IFDIR = 0040000 def isDir( mode ): """ True if mode indicates a directory""" return S_IFMT( mode ) == S_IFDIR def getUsersGroupsAndSEs( queryQueue, name_id_se ): """ This dumps the users, groups and SEs from the LFC, converts them into DIRAC group, and create the query to insert them into the DFC. WATCH OUT !! The DIRAC group is evaluated from the VOMS attribute, since this is what is used in the LFC. So if you have several DIRAC groups that have the same voms role, the assigned group will be the first one... :param queryQueue : queue in which to put the SQL statement to be executed against the DFC :param name_id_se : cache to be filled in with the mapping {seName:id in the DFC} """ connection = cx_Oracle.Connection( prodDbAccount, prodDbAccountPassword, prodDbTNS, threaded = True ) fromdbR_ = connection.cursor() fromdbR_.arraysize = 1000 # First, fetch all the UID and DN from the LFC if fromdbR_.execute( "SELECT USERID, USERNAME from CNS_USERINFO" ): rows = fromdbR_.fetchall() for row in rows: uid = row[0] dn = row[1] name = uid_name.get( uid ) # If the name is not in cache, if not name: # We do some mangling of the DN to try to have sensitive name whatever happen # We also include the UID in case the name is not unique, or several DN are associated namePart = 'name' idPart = '' dnSplit = dn.split( 'CN=' ) try: namePart = dnSplit[-1].replace( "'", " " ) except Exception as e: pass try: idPart = dnSplit[2].replace( "/", "" ) except Exception as e: pass idPart += ' uid_%s' % uid name = "Unknown (%s %s)" % ( namePart, idPart ) # Now, we do try to convert the DN into a DIRAC username if "Unknown" in name: result = getUsernameForDN( dn ) if result['OK']: name = result['Value'] uid_name[uid] = name # Now we prepare the SQL statement to insert them into the DFC for uid in uid_name: username = uid_name[uid] queryQueue.put( "INSERT INTO FC_Users(UID, UserName) values (%s, '%s');\n" % ( uid, username ) ) # Now, same principle on the group if fromdbR_.execute( "SELECT GID, GROUPNAME from CNS_GROUPINFO" ): rows = fromdbR_.fetchall() for row in rows: gid = row[0] gn = row[1] groupname = gn # CAUTION: as explained in the docstring, if multiple groups share the same voms role # we take the first one groups = getGroupsWithVOMSAttribute( '/' + gn ) if groups: groupname = groups[0] queryQueue.put( "INSERT INTO FC_Groups(GID, GroupName) values (%s, '%s');\n" % ( gid, groupname ) ) # We build a cache that contains the mapping between the name and its ID in the DFC DB # The ID starts at 2 because ID=1 is taken by FakeSe seCounter = 2 # Fetch the name from the LFC if fromdbR_.execute( "select unique HOST from CNS_FILE_REPLICA" ): rows = fromdbR_.fetchall() for row in rows: seName = row[0] # Populate the SE cache name_id_se[seName] = seCounter # Create the query for the DFC queryQueue.put( "INSERT INTO FC_StorageElements(SEID, SEName) values (%s, '%s');\n" % ( seCounter, seName ) ) seCounter += 1 # Also here we just insert all the statuses defined above for status in dirac_status: queryQueue.put( "INSERT INTO FC_Statuses (Status) values ('%s');\n" % status ) fromdbR_.close() connection.close() # Set the poison pill in the queue queryQueue.put( None ) return def getDirAndFileData( fileQueue, dirQueue, dirClosureQueryQueue ): """ This fetches the Files and Directories from the LFC, and prepare the necessary SQL statement to insert them into the DFC. The File and Directory ids are kept consistent between the LFC and the DFC. Warning: don't worry if this starts and takes ages before starting printing anything. It is because it is doing the SYS_CONNECT_BY_PATH query, which takes a long time to return :param fileQueue : queue in which to put the values to be inserted for the files :param dirQueue : queue in which to put the values to be inserted for the directories :param dirClosureQueryQueue : queue in which to put the insert statement for the directory closure table """ print "getDirAndFileData start" startTime = time.time() stepCounter = 0 fileCounter = 0 dirCounter = 0 connection = cx_Oracle.Connection( prodDbAccount, prodDbAccountPassword, prodDbTNS, threaded = True ) fromdbR_ = connection.cursor() fromdbR_.arraysize = 1000 # Parameters we query. # It is to be noted that Files and directories are stored in the same table in the LFC, # hence we query, and then we have to make the distinction directoryParameters = ['fileid', 'parent_fileid', 'guid', "SYS_CONNECT_BY_PATH(name, '/') path", 'LEVEL lvl', 'filemode', 'nlink', 'owner_uid', 'gid', 'filesize', 'atime', 'mtime', 'ctime', 'status', 'csumtype', 'csumvalue' ] directoryCommand = "SELECT %s from CNS_FILE_METADATA START WITH fileid=3 \ CONNECT BY NOCYCLE PRIOR fileid=parent_fileid order by lvl asc,\ parent_fileid" % ( ','.join( directoryParameters ) ) # These variables are the index of the various information in the tuples returned by Oracle CNS_FILE_MODE = directoryParameters.index( 'filemode' ) CNS_FILE_FILEID = directoryParameters.index( 'fileid' ) CNS_FILE_PARENTID = directoryParameters.index( 'parent_fileid' ) CNS_FILE_STATUS = directoryParameters.index( 'status' ) CNS_FILE_SIZE = directoryParameters.index( 'filesize' ) CNS_FILE_UID = directoryParameters.index( 'owner_uid' ) CNS_FILE_GID = directoryParameters.index( 'gid' ) CNS_FILE_NAME = directoryParameters.index( "SYS_CONNECT_BY_PATH(name, '/') path" ) CNS_FILE_GUID = directoryParameters.index( 'guid' ) CNS_FILE_CHECKSUM = directoryParameters.index( 'csumvalue' ) CNS_FILE_CTIME = directoryParameters.index( 'ctime' ) CNS_FILE_MTIME = directoryParameters.index( 'mtime' ) if fromdbR_.execute( directoryCommand ): done = False batchSize = 1000 # Size of the batch that are retrieved at once # The insertion is done in 2 times (part for the files, ther other for directories) while not done: print "getDirAndFileData step %s elapsed time %s dir %s files %s" % ( stepCounter, time.time() - startTime, dirCounter, fileCounter ) stepCounter += 1 # If doing an early stop if earlyStopEnabled and ( fileCounter + dirCounter ) >= earlyStop: done = True rows = fromdbR_.fetchmany( batchSize ) # We prepare list of tuples of values to be inserted at once fileTuple = [] dirTuple = [] # If nothing is returned anymore, we are done. if rows == [] : done = True else: for row in rows: cns_file_mode = row[CNS_FILE_MODE] cns_file_id = row[CNS_FILE_FILEID] cns_parent_id = row[CNS_FILE_PARENTID] cns_uid = row[CNS_FILE_UID] cns_gid = row[CNS_FILE_GID] # we ignore the real / so that /grid becomes the new root if cns_parent_id == 0: continue # The LFC only stores the current path level, but the query above # returned a full path. # We want to get ride of the '/grid' that was mandatory in the LFC cns_name = cns_name = os.path.realpath( row[CNS_FILE_NAME] ) if cns_name.startswith( '/grid/' ): cns_name = cns_name[5:] elif cns_name == '/grid': cns_name = '/' cns_parent_id = 0 # Convert the status from the LFC to the DFC. # Basically, 'P' becomes Trash, and the rest becomes 'APrioriGood' dfc_status = lfc_dfc_status.get( row[CNS_FILE_STATUS], 2 ) # Convert the LFC mode to posix mode for the DFC dfc_mode = S_IMODE( int( row[CNS_FILE_MODE] ) ) # Creation and modification time. # (nb: I have doubts about how trustable are these dates...) dfc_cdate = datetime.utcfromtimestamp( row[CNS_FILE_CTIME] ).strftime( '%Y-%m-%d %H:%M:%S' ) dfc_mdate = datetime.utcfromtimestamp( row[CNS_FILE_MTIME] ).strftime( '%Y-%m-%d %H:%M:%S' ) # It's a file if not isDir( cns_file_mode ): fileCounter += 1 # Get the size cns_file_size = row[CNS_FILE_SIZE] # In the file table, we only store the actual file name, not the path dfc_name = os.path.basename( cns_name ) # Insert the tuple of values to be inserted in the File table fileTuple.append( "(%s, %s, %s, %s, %s, %s, '%s','%s','%s','%s','%s','%s','%s',%s)" % ( cns_file_id, cns_parent_id, cns_file_size, cns_uid, cns_gid, dfc_status, dfc_name, row[CNS_FILE_GUID], row[CNS_FILE_CHECKSUM], 'Adler32', 'File', dfc_cdate, dfc_mdate, dfc_mode ) ) else: dirCounter += 1 # Insert the tuple of values to be inserted in the directory table dirTuple.append( "(%s,'%s',%s,%s,'%s','%s',%s,%s)" % ( cns_file_id, cns_name, cns_uid, cns_gid, dfc_cdate, dfc_mdate, dfc_mode, dfc_status ) ) # Insert the statement in the directory closure table for the directory to itself sqlDirClosure = "INSERT INTO FC_DirectoryClosure \ (ParentID, ChildID, Depth )\ VALUES (%s,%s, 0);" % ( cns_file_id, cns_file_id ) dirClosureQueryQueue.put( "%s\n" % sqlDirClosure ) # If we have a parent, we must also insert all the hierarchy in teh closure table if cns_parent_id: sqlDirClosureSub = "INSERT INTO FC_DirectoryClosure \ (ParentID, ChildID, depth) \ SELECT p.ParentID, %s, p.depth + 1 \ FROM FC_DirectoryClosure p \ WHERE p.ChildID = %s;" % ( cns_file_id, cns_parent_id ) # fileAndDirInsert.write( "%s\n" % sqlDirClosureSub ) dirClosureQueryQueue.put( "%s\n" % sqlDirClosureSub ) print "getDirAndFileData put in queues file %s" % ( len( fileTuple ) ) # Add the list of tuples to be inserted to the queues if fileTuple: fileQueue.put( fileTuple ) if dirTuple: dirQueue.put( dirTuple ) print "getDirAndFileData done. elapsed time %s dir %s files %s" % ( time.time() - startTime, dirCounter, fileCounter ) fromdbR_.close() connection.close() # Poison pills fileQueue.put( None ) dirQueue.put( None ) dirClosureQueryQueue.put( None ) return def getReplicaData( replicaQueue, name_id_se ): """ This fetches the replicas from the LFC, and prepare the values to be inserted in the DFC The Replica ID is kept consistent between the LFC and the DFC. :param replicaQueue: queue in which we insert the tuples of values to be inserted :param name_id_se: mapping between {SEName:id in the DFC} that was built earlier """ print "getReplicaData start" startTime = time.time() stepCounter = 0 replicaCounter = 0 # This is the ID we assign to the Replica # Not strictly necessary because of auto increment cns_rep_id = 1 connection = cx_Oracle.Connection( prodDbAccount, prodDbAccountPassword, prodDbTNS, threaded = True ) fromdbR_ = connection.cursor() fromdbR_.arraysize = 1000 # Replica parameters we are interested in replicaParameters = ['FileId', 'NBACCESSES', 'ATIME', 'PTIME', 'Status', 'F_TYPE', 'POOLNAME', 'Host', 'FS', 'SFN', 'CTIME', 'LTIME', 'R_TYPE', 'SETNAME', 'XATTR'] getReplicaCommand = "SELECT %s from CNS_FILE_REPLICA" % ( ','.join( replicaParameters ) ) # These variables are the index of the parameters in the tuples # returned by the LFC CNS_REP_FILEID = replicaParameters.index( 'FileId' ) CNS_REP_STATUS = replicaParameters.index( 'Status' ) CNS_REP_SFN = replicaParameters.index( 'SFN' ) CNS_REP_CTIME = replicaParameters.index( 'CTIME' ) CNS_REP_MTIME = replicaParameters.index( 'ATIME' ) CNS_REP_HOST = replicaParameters.index( 'Host' ) if fromdbR_.execute( getReplicaCommand ): done = False # Size of the batch that are retrieved and inserted at once batchSize = 1000 while not done: print "getReplicaData step %s elapsed time %s replica %s" % ( stepCounter, time.time() - startTime, replicaCounter ) stepCounter += 1 # If early stop enabled if earlyStopEnabled and ( replicaCounter ) >= earlyStop: done = True # list of tuples that will be inserted at once repTuple = [] rows = fromdbR_.fetchmany( batchSize ) # If there is nothing else returned, we are done if rows == []: done = True else: for row in rows: replicaCounter += 1 # Get the SSE ID dfc_rep_seId = name_id_se.get( row[CNS_REP_HOST] ) # Since the file id were kept consistent, we keep the same cns_file_id = row[CNS_REP_FILEID] # Conversion of the status, just for the file dfc_status = lfc_dfc_status.get( row[CNS_REP_STATUS], 2 ) # Modification time dfc_mdate = datetime.utcfromtimestamp( row[CNS_REP_MTIME] ).strftime( '%Y-%m-%d %H:%M:%S' ) # Creation time # sometimes, row[CNS_REP_CTIME] is None try: dfc_cdate = datetime.utcfromtimestamp( row[CNS_REP_CTIME] ).strftime( '%Y-%m-%d %H:%M:%S' ) except: dfc_cdate = dfc_mdate # The PFN (even if arguably useful now...) cns_rep_sfn = row[CNS_REP_SFN] # Add the values to the list of tuple repTuple.append( "(%s,%s,%s,%s,'%s','%s','%s')" % ( cns_rep_id, cns_file_id, dfc_rep_seId, dfc_status, dfc_cdate, dfc_mdate, cns_rep_sfn ) ) cns_rep_id += 1 print "getReplicaData add replicaQueue %s" % ( len( repTuple ) ) # Add the values to be inserted if repTuple: replicaQueue.put( repTuple ) print "getReplicaData done. elapsed time %s replicas %s" % ( time.time() - startTime, replicaCounter ) fromdbR_.close() connection.close() # Poison pill replicaQueue.put( None ) return def loadDataInMySQL( queryQueue, workerId ): """ This generic method just executes whatever SQL query is put in the queue. It stops when getting the poison pill (None). The execution is done in transactions, and we disable to foreign key checking :param queryQueue: queue that contains the query :param workerID : whatever name/id for prints """ con = mdb.connect( host = dfc_db_host, port = dfc_db_port, user = dfc_db_user, passwd = dfc_db_passwd, db = dfc_db_name, unix_socket = dfc_unix_socket ) cur = con.cursor() con.autocommit( False ) cur.execute( "START TRANSACTION;" ) cur.execute( "SET FOREIGN_KEY_CHECKS = 0;" ) queryExecuted = 0 logfile = open( os.path.join( logDir, "worker%s.txt" % workerId ), 'w' ) while True: next_query = queryQueue.get() # If we go the poison pill, # commit all, set back the foreign key check # close the connections, and bye bye if next_query is None: cur.execute( "COMMIT;" ) cur.execute( "SET FOREIGN_KEY_CHECKS = 1;" ) con.commit() con.autocommit( True ) cur.close() con.close() print "loadDataInMySQL EXITING " logfile.write( "loadDataInMySQL EXITING " ) logfile.close() return # If we reach the threshold, we commit if queryExecuted % commitThreshold == 0: cur.execute( "COMMIT;" ) con.commit() cur.execute( "START TRANSACTION;" ) # We try to execute the query # If ever it fail for a reason or another, we try again # If it fails gain, we ignore this query try: cur.execute( next_query ) except Exception as e: print "worker %s : EXCEPTION %s\nworker %s :QUERY %s" % ( workerId, e, workerId, next_query ) print "worker %s trying again " % workerId logfile.write( "worker %s : EXCEPTION %s\nworker %s :QUERY %s" % ( workerId, e, workerId, next_query ) ) logfile.write( "worker %s trying again " % workerId ) try: cur.execute( next_query ) except Exception, ee: print "worker %s COMPLETELY FAILED " % workerId print "worker %s : EXCEPTION %s\nworker %s :QUERY %s" % ( workerId, e, workerId, next_query ) logfile.write( "worker %s COMPLETELY FAILED " % workerId ) logfile.write( "worker %s : EXCEPTION %s\nworker %s :QUERY %s" % ( workerId, e, workerId, next_query ) ) queryExecuted += 1 # Tell we are done with this task queryQueue.task_done() if queryExecuted % 10000 == 0: print "worker %s (%s) : %s" % ( workerId, queryExecuted, next_query ) logfile.write( "worker %s (%s) : %s" % ( workerId, queryExecuted, next_query ) ) logfile.close() def loadTupleDataInMySQL( queryQueue, workerId, querybase ): """ This generic method executes an insert query given in querybase argument using the values given in the queryQueue. The execution is done in transactions, and we disable to foreign key checking :param queryQueue: queue that contains the values as a list of string tuples (e.g. [ '(1,2)', '(3,4)']) :param workerID : whatever name/id for prints :param querybase: base of the insert sql statement (e.g. "INSERT INTO myTable (x,y) values ") """ con = mdb.connect( host = dfc_db_host, port = dfc_db_port, user = dfc_db_user, passwd = dfc_db_passwd, db = dfc_db_name, unix_socket = dfc_unix_socket ); cur = con.cursor() cur.execute( "SET FOREIGN_KEY_CHECKS = 0;" ) queryExecuted = 0 logfile = open( os.path.join( logDir, "worker%s.txt" % workerId ), 'w' ) workerStart = time.time() while True: next_tuple = queryQueue.get() # If we get the poison pill, clean and exit if next_tuple is None: print "worker %s : got poison pill. elapsed time %s" % ( workerId, time.time() - workerStart ) logfile.write( "worker %s : got poison pill. elapsed time %s\n" % ( workerId, time.time() - workerStart ) ) cur.execute( "SET FOREIGN_KEY_CHECKS = 1;" ) cur.close() con.close() print "loadDataInMySQL %s EXITING " % workerId logfile.write( "loadDataInMySQL %s EXITING " % workerId ) logfile.close() return print "worker %s : got %s" % ( workerId, len( next_tuple ) ) logfile.write( "worker %s : got %s\n" % ( workerId, len( next_tuple ) ) ) # Build the query and execute # If it fails, try again # If it fails again, forget about that one try: next_query = querybase + ','.join( next_tuple ) + ';' cur.execute( next_query ) con.commit() except Exception as e: print "worker %s : EXCEPTION %s\nworker %s :QUERY %s" % ( workerId, e, workerId, next_query ) print "worker %s trying again " % workerId logfile.write( "worker %s : EXCEPTION %s\nworker %s :QUERY %s" % ( workerId, e, workerId, next_query ) ) logfile.write( "worker %s trying again " % workerId ) try: cur.execute( next_query ) con.commit() except Exception, ee: print "worker %s COMPLETELY FAILED " % workerId print "worker %s : EXCEPTION %s " % ( workerId, e ) logfile.write( "worker %s COMPLETELY FAILED " % workerId ) logfile.write( "worker %s : EXCEPTION %s\nworker %s :QUERY %s" % ( workerId, e, workerId, next_query ) ) queryExecuted += len( next_tuple ) queryQueue.task_done() if queryExecuted % 10000 == 0: print "worker %s (%s) elapsed time %s" % ( workerId, queryExecuted, time.time() - workerStart ) logfile.write( "worker %s (%s) elapsed time %s " % ( workerId, queryExecuted, time.time() - workerStart ) ) logfile.close() # Admin has ID 0 in LFC, 1 in DFC def updateAdminID(): """ The 'root'/admin has an ID 0 in the LFC but 1 in the DFC. So we make the update. """ con = mdb.connect( host = dfc_db_host, port = dfc_db_port, user = dfc_db_user, passwd = dfc_db_passwd, db = dfc_db_name, unix_socket = dfc_unix_socket ); cur = con.cursor() workerId = "updateAdminID" logfile = open( os.path.join( logDir, "worker%s.txt" % workerId ), 'w' ) updateQueries = { "Update FC_Files" : "Update FC_Files set UID = 1, GID = 1 where UID = 0 and GID = 0", "Update FC_DirectoryList" : "Update FC_DirectoryList set UID = 1, GID = 1 where UID = 0 and GID = 0", } for desc, query in updateQueries.items(): print "worker %s : %s (%s)" % ( workerId, desc, query ) logfile.write( "worker %s : %s (%s)\n" % ( workerId, desc, query ) ) cur.execute( query ) con.commit() cur.close() con.close() print "updateAdminID EXITING " logfile.write( "updateAdminID EXITING " ) logfile.close() return def databaseIntegrityCheck(): """ This does some integrity check: * If some users have no files/directories (expect some...) * If some files belong to non existing users (should not be !) * If some directories belong to non existing users (should not be !) * If some groups have no files/directories (expect some...) * If some files belong to non existing groups (should not be !) * If some directories belong to non existing groups (should not be !) It just prints the output, does not take action. """ con = mdb.connect( host = dfc_db_host, port = dfc_db_port, user = dfc_db_user, passwd = dfc_db_passwd, db = dfc_db_name, unix_socket = dfc_unix_socket ); cur = con.cursor() workerId = "dbCheck" logfile = open( os.path.join( logDir, "worker%s.txt" % workerId ), 'w' ) integrityQueries = { "Useless Users" : " SELECT u.* from FC_Users u\ LEFT OUTER JOIN\ (select distinct(UID)\ From FC_DirectoryList\ UNION\ select distinct(UID)\ from FC_Files) i\ ON u.UID = i.UID\ WHERE i.UID IS NULL", "Files with Non existing users" : "SELECT f.*\ from FC_Files f\ LEFT OUTER JOIN\ FC_Users u\ ON f.UID = u.UID\ WHERE u.UID IS NULL", "Directories with non existing users" : "SELECT d.*\ FROM FC_DirectoryList d\ LEFT OUTER JOIN\ FC_Users u\ ON d.UID = u.UID\ WHERE u.UID IS NULL", "Useless groups": "SELECT g.*\ FROM FC_Groups g\ LEFT OUTER JOIN\ (SELECT distinct(GID)\ FROM FC_DirectoryList\ UNION\ SELECT distinct(GID)\ FROM FC_Files) i\ ON g.GID = i.GID\ WHERE i.GID IS NULL", "Files with non existing groups" : "SELECT f.*\ FROM FC_Files f\ LEFT OUTER JOIN\ FC_Groups g\ ON f.GID = g.GID\ WHERE g.GID IS NULL", "Directories with non existing groups" : "SELECT d.*\ FROM FC_DirectoryList d\ LEFT OUTER JOIN\ FC_Groups g\ ON d.GID = g.GID\ WHERE g.GID IS NULL" } for desc, query in integrityQueries.items(): print "worker %s : %s (%s)" % ( workerId, desc, query ) logfile.write( "worker %s : %s (%s)\n" % ( workerId, desc, query ) ) cur.execute( query ) rows = cur.fetchall() for row in rows: print "\t%s" % ( row, ) logfile.write( "\t%s\n" % ( row, ) ) cur.close() con.close() print "databaseIntegrityCheck EXITING " logfile.write( "databaseIntegrityCheck EXITING " ) logfile.close() return def compareNumbers(): """ This compares how many entries you have in the LFC and in the DFC. In principle, there should be the same, but there might be reasons why they are not, and with no reasons to worry... trust the other consistency check more! (n.b. : it typically happens when you have 2 replicas for in the same SE for the same file, but with a different 'SFN'. The LFC allowed that... They will now be only one left, the first, but it doesn't matter, since DIRAC recompute the URL anyway ) """ con = mdb.connect( host = dfc_db_host, port = dfc_db_port, user = dfc_db_user, passwd = dfc_db_passwd, db = dfc_db_name, unix_socket = dfc_unix_socket ); cur = con.cursor() connection = cx_Oracle.Connection( prodDbAccount, prodDbAccountPassword, prodDbTNS, threaded = True ) fromdbR_ = connection.cursor() fromdbR_.arraysize = 1000 lfc_filesDir = 0 lfc_replicas = 0 dfc_files = 0 dfc_dir = 0 dfc_replicas = 0 if fromdbR_.execute( "SELECT count(*) from CNS_FILE_METADATA" ): rows = fromdbR_.fetchall() print "rows lfc files %s" % ( rows, ) lfc_filesDir = rows[0][0] if fromdbR_.execute( "SELECT count(*) from CNS_FILE_REPLICA" ): rows = fromdbR_.fetchall() print "rows lfc replicas %s" % ( rows, ) lfc_replicas = rows[0][0] cur.execute( "SELECT count(*) from FC_Files" ) rows = cur.fetchall() print "rows dfc files %s" % ( rows, ) dfc_files = rows[0][0] cur.execute( "SELECT count(*) from FC_DirectoryList" ) rows = cur.fetchall() print "rows dfc dir %s" % ( rows, ) dfc_dir = rows[0][0] cur.execute( "SELECT count(*) from FC_Replicas" ) rows = cur.fetchall() print "rows dfc replicas %s" % ( rows, ) dfc_replicas = rows[0][0] allCounters = [lfc_filesDir, lfc_replicas, dfc_files, dfc_dir, dfc_replicas] for counter in allCounters: if counter: print "OK" else: print "EMPTY COUNTER" if lfc_filesDir != ( dfc_files + dfc_dir + 1 ) : # /grid folder print "ERROR ! lfc_filesDir != (dfc_files + dfc_dir + 1) %s != %s" % ( lfc_filesDir, dfc_files + dfc_dir + 1 ) if lfc_replicas != dfc_replicas: print "ERROR ! lfc_replicas != dfc_replicas %s != %s" % ( lfc_replicas, dfc_replicas ) cur.close() con.close() fromdbR_.close() connection.close() return if __name__ == '__main__': startTime = time.time() manager = multiprocessing.Manager() # Mapping between SEnames and its id in the DFC. name_id_se = manager.dict() # List of queues we are going to use queueTab = [] # Queue for Users, Groups and SE # We don't add it in the queue tab because # we need this one to be finished before starting the others ugsQueryQueue = multiprocessing.JoinableQueue() # Queue for the file fileQueryQueue = multiprocessing.JoinableQueue() queueTab.append( fileQueryQueue ) # Queue for the directory dirQueryQueue = multiprocessing.JoinableQueue() queueTab.append( dirQueryQueue ) # Queue for the directory closure dirClosureQueryQueue = multiprocessing.JoinableQueue() queueTab.append( dirClosureQueryQueue ) # Queue for the replicas replicaQueryQueue = multiprocessing.JoinableQueue() queueTab.append( replicaQueryQueue ) print "Starting the Worker processes" # process to execute the query for Users, Groups and SEs workerUgsProcess = multiprocessing.Process( target = loadDataInMySQL, args = ( ugsQueryQueue, 'ugs' ) ) workerUgsProcess.start() # process to insert the values for Files fileBaseQuery = "INSERT IGNORE INTO FC_Files (FileID, DirID, Size, UID, GID, Status,\ Filename, GUID, Checksum, ChecksumType, Type, CreationDate, ModificationDate, Mode) Values " workerFileProcess = multiprocessing.Process( target = loadTupleDataInMySQL, args = ( fileQueryQueue, 'file', fileBaseQuery ) ) workerFileProcess.start() # process to insert the values for Directories dirBaseQuery = "INSERT INTO FC_DirectoryList (DirID, Name, UID,GID, CreationDate,\ ModificationDate, Mode, Status) values " workerDirProcess = multiprocessing.Process( target = loadTupleDataInMySQL, args = ( dirQueryQueue, 'dir', dirBaseQuery ) ) workerDirProcess.start() # process to execute the query for directory closure workerDirClosureProcess = multiprocessing.Process( target = loadDataInMySQL, args = ( dirClosureQueryQueue, 'dirClosure' ) ) workerDirClosureProcess.start() # process to insert the values for Replicas # CAUTION: note the IGNORE statement... # This saves us from the double replica on a singel SE replicaBaseQuery = "INSERT IGNORE INTO FC_Replicas (RepID, FileID, SEID, Status,\ CreationDate, ModificationDate, PFN) values " workerReplicaProcess = multiprocessing.Process( target = loadTupleDataInMySQL, args = ( replicaQueryQueue, 'replica', replicaBaseQuery ) ) workerReplicaProcess.start() # First we get dump the Users, Groups and SEs print "Worker processes started" print "Starting the ugsDumpProcess" ugsDumpProcess = multiprocessing.Process( target = getUsersGroupsAndSEs, args = ( ugsQueryQueue, name_id_se ) ) ugsDumpProcess.start() print "ugsDumpProcess started" print "Waiting for ugsDumpProcess to join" ugsDumpProcess.join() print "ugsDumpProcess joined" print "Waiting for empty ugs query queue" # We have to wait to fill in all the caches while not ugsQueryQueue.empty(): print "ugs queue not empty %s" % ( ugsQueryQueue.qsize() ) time.sleep( 5 ) # Now we go :-) # We start getting and inserting in parallel files, directories and replicas print "Queue is empty" print "Start fileDumpProcess" fileDumpProcess = multiprocessing.Process( target = getDirAndFileData, args = ( fileQueryQueue, dirQueryQueue, dirClosureQueryQueue ) ) fileDumpProcess.start() print "fileDumpProcess started" print "Start replicaDumpProcess" replicaDumpProcess = multiprocessing.Process( target = getReplicaData, args = ( replicaQueryQueue, name_id_se ) ) replicaDumpProcess.start() print "replicaDumpProcess started" print "joining fileDumpProcess" fileDumpProcess.join() print "fileDumpProcess joined" print "joining replicaDumpProcess" replicaDumpProcess.join() print "replicaDumpProcess joined" # We wait till everything is empty print "Waiting for empty query queue" allEmpty = False # We have to wait to fill in all the caches while not allEmpty : time.sleep( 5 ) emptyTab = [ q.empty() for q in queueTab] allEmpty = reduce( lambda x, y: x and y, emptyTab, True ) print "Queues not empty %s " % ( [q.qsize() for q in queueTab] ) print "queues should be empty %s" % ( [ q.empty() for q in queueTab] ) print "finished (before join...). Total time : %s" % ( time.time() - startTime ) # Here we are done # Joining the process print "joining worker processes" print "file worker" workerFileProcess.join() print "dir worker" workerDirProcess.join() print "dirClosure worker" workerDirClosureProcess.join() print "replica worker" workerReplicaProcess.join() # Translating admin ID from 0 to 1 print "Updating Admin ID" updateIDStart = time.time() updateAdminID() updateIDTime = time.time() - updateIDStart print "Finished updating AdminID in %s" % updateIDTime # Doing the integrity check print "doing integrity check" integrityStart = time.time() databaseIntegrityCheck() integrityTime = time.time() - integrityStart print "Finished integrity check in %s" % integrityTime # Comparing the numbers print "doing number compare check" numberCompareStart = time.time() compareNumbers() numberCompareTime = time.time() - numberCompareStart print "Finished comparing number in %s" % numberCompareTime # To be clean, we try to join the queues # but here kicks the mystery of queues and multiprocess # in python, so it never ends print "joining queues" print "file queue" fileQueryQueue.join() print "replicas queue" replicaQueryQueue.join() print "queue joined"
hgiemza/DIRAC
DataManagementSystem/Utilities/lfc_dfc_db_copy.py
Python
gpl-3.0
40,796
[ "DIRAC" ]
abc238699f4be0e0973c969ab112c034b2220cb8bfed8b78cf0d8d35f3941c43
#!/usr/bin/env python # Copyright 2001 by Brad Chapman. All rights reserved. # This code is part of the Biopython distribution and governed by its # license. Please see the LICENSE file that should have been included # as part of this package. """Test the creation of graphics of distribution information. Provides tests for the Graphics.Distribution classes which provide the ability to create graphics to show the distribution of information about BACs/chromosomes/etc. """ # standard library import os import random import unittest from Bio import MissingExternalDependencyError try: import reportlab as r del r except: raise MissingExternalDependencyError( "Install reportlab if you want to use Bio.Graphics.") # local stuff from Bio.Graphics.Distribution import BarChartDistribution from Bio.Graphics.Distribution import LineDistribution from Bio.Graphics.Distribution import DistributionPage def random_distribution(min=-5.0, max=5.0, total_items=50): """Create a series of random distribution information. """ num_items = random.randrange(5, total_items) all_info = [] for item in range(num_items): new_item = random.uniform(min, max) all_info.append(new_item) return all_info class BarChartTest(unittest.TestCase): """Test display of BarChart distributions on a page. """ def setUp(self): self.simple_page = os.path.join(os.getcwd(), "Graphics", "simple_bar.pdf") self.multi_page = os.path.join(os.getcwd(), "Graphics", "multi_bar.pdf") self.num_multi = 5 def test_simple_page(self): """Test displaying a page with single distribution. """ dist_info = [] new_info = random_distribution() dist_info.append(new_info) distribution = BarChartDistribution(dist_info) dist_page = DistributionPage() dist_page.distributions.append(distribution) dist_page.draw(self.simple_page, "Test Bar Chart") def test_multi_page(self): """Create a page with multiple distributions on it. """ dist_page = DistributionPage() dist_page.number_of_columns = 3 for multi in range(self.num_multi): dist_info = [] new_info = random_distribution() dist_info.append(new_info) distribution = BarChartDistribution(dist_info) distribution.chart_title = "Distribution %s" % (multi + 1) dist_page.distributions.append(distribution) dist_page.draw(self.multi_page, "Test Multi Bar Chart") if __name__ == "__main__": runner = unittest.TextTestRunner(verbosity=2) unittest.main(testRunner=runner)
updownlife/multipleK
dependencies/biopython-1.65/Tests/test_GraphicsDistribution.py
Python
gpl-2.0
2,691
[ "Biopython" ]
4fe7c0aed78ad9ea776438a70a769a16d79b528b833c8eb2c6b6be53eacc2813
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. from __future__ import absolute_import import os import click import cookiecutter.main import please_cli.config import please_cli.utils CMD_HELP = ''' Create a new PROJECT from a TEMPLATE \b TEMPLATES: {templates} '''.format( templates=''.join([' - ' + i + '\n' for i in please_cli.config.TEMPLATES]), ) @click.command( cls=please_cli.utils.ClickCustomCommand, short_help="Create PROJECT initial structure.", epilog="Happy hacking!", help=CMD_HELP, ) @click.argument( 'template', required=True, type=click.Choice(please_cli.config.TEMPLATES), ) @click.argument( 'project', required=True, type=str, ) @click.pass_context def cmd(ctx, template, project): """ """ template_dir = os.path.join(please_cli.config.ROOT_DIR, 'nix', 'templates', template) if not os.path.isdir(template_dir): raise project_dir = os.path.join(please_cli.config.SRC_DIR, project) if os.path.isdir(project_dir): raise template_options = dict(project=project) template_options['project_path'] = project.replace('-', '_') template_options['project_url'] = 'TODO' if project.startswith('releng-'): template_options['project_url'] = '{}.mozilla-releng.net'.format(project[len('releng-'):]) if project.startswith('shipit-'): template_options['project_url'] = '{}.shipit.mozilla-releng.net'.format(project[len('shipit-'):]) click.echo('=> Creating project structure ...') cookiecutter.main.cookiecutter( template_dir, no_input=True, extra_context=template_options, output_dir=please_cli.config.SRC_DIR, ) click.secho('\nProject `{}` created sucessfully!'.format(project), fg='green', bold=True) click.echo('\nCode is located at:') click.echo(' src/{}'.format(project)) click.echo('\nTo enter development environemnt run:') click.echo(' ./please shell {}'.format(project)) click.echo('\nTo read more about `{}` template visit:'.format(template)) click.echo(' https://docs.mozilla-releng.net/develop/template-{}.html'.format(template)) click.echo('')
lundjordan/services
lib/please_cli/please_cli/create.py
Python
mpl-2.0
2,344
[ "VisIt" ]
dc408487d4a2d1a5d296d55ea47778492daacee8644386dacb6a263fb6cbdf6f
# Sample module in the public domain. Feel free to use this as a template # for your modules (and you can remove this header and take complete credit # and liability) # # Contact: Brian Carrier [carrier <at> sleuthkit [dot] org] # # This is free and unencumbered software released into the public domain. # # Anyone is free to copy, modify, publish, use, compile, sell, or # distribute this software, either in source code form or as a compiled # binary, for any purpose, commercial or non-commercial, and by any # means. # # In jurisdictions that recognize copyright laws, the author or authors # of this software dedicate any and all copyright interest in the # software to the public domain. We make this dedication for the benefit # of the public at large and to the detriment of our heirs and # successors. We intend this dedication to be an overt act of # relinquishment in perpetuity of all present and future rights to this # software under copyright law. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, # ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. # Ingest module for Autopsy with GUI # # Difference between other modules in this folder is that it has a GUI # for user options. This is not needed for very basic modules. If you # don't need a configuration UI, start with the other sample module. # # Search for TODO for the things that you need to change # See http://sleuthkit.org/autopsy/docs/api-docs/latest/index.html for documentation import jarray import inspect from java.lang import System from java.util.logging import Level from javax.swing import JCheckBox from javax.swing import BoxLayout from org.sleuthkit.autopsy.casemodule import Case from org.sleuthkit.autopsy.casemodule.services import Services from org.sleuthkit.autopsy.ingest import DataSourceIngestModule from org.sleuthkit.autopsy.ingest import FileIngestModule from org.sleuthkit.autopsy.ingest import GenericIngestModuleJobSettings from org.sleuthkit.autopsy.ingest import IngestMessage from org.sleuthkit.autopsy.ingest import IngestModule from org.sleuthkit.autopsy.ingest.IngestModule import IngestModuleException from org.sleuthkit.autopsy.ingest import IngestModuleFactoryAdapter from org.sleuthkit.autopsy.ingest import IngestModuleIngestJobSettings from org.sleuthkit.autopsy.ingest import IngestModuleIngestJobSettingsPanel from org.sleuthkit.autopsy.ingest import IngestServices from org.sleuthkit.autopsy.ingest import IngestModuleGlobalSettingsPanel from org.sleuthkit.datamodel import BlackboardArtifact from org.sleuthkit.datamodel import BlackboardAttribute from org.sleuthkit.datamodel import ReadContentInputStream from org.sleuthkit.autopsy.coreutils import Logger from java.lang import IllegalArgumentException # TODO: Rename this to something more specific class SampleFileIngestModuleWithUIFactory(IngestModuleFactoryAdapter): def __init__(self): self.settings = None # TODO: give it a unique name. Will be shown in module list, logs, etc. moduleName = "Sample Data Source Module with UI" def getModuleDisplayName(self): return self.moduleName # TODO: Give it a description def getModuleDescription(self): return "Sample module that does X, Y, and Z." def getModuleVersionNumber(self): return "1.0" # TODO: Update class name to one that you create below def getDefaultIngestJobSettings(self): return GenericIngestModuleJobSettings() # TODO: Keep enabled only if you need ingest job-specific settings UI def hasIngestJobSettingsPanel(self): return True # TODO: Update class names to ones that you create below # Note that you must use GenericIngestModuleJobSettings instead of making a custom settings class. def getIngestJobSettingsPanel(self, settings): if not isinstance(settings, GenericIngestModuleJobSettings): raise IllegalArgumentException("Expected settings argument to be instanceof GenericIngestModuleJobSettings") self.settings = settings return SampleFileIngestModuleWithUISettingsPanel(self.settings) def isFileIngestModuleFactory(self): return True # TODO: Update class name to one that you create below def createFileIngestModule(self, ingestOptions): return SampleFileIngestModuleWithUI(self.settings) # File-level ingest module. One gets created per thread. # TODO: Rename this to something more specific. Could just remove "Factory" from above name. # Looks at the attributes of the passed in file. class SampleFileIngestModuleWithUI(FileIngestModule): _logger = Logger.getLogger(SampleFileIngestModuleWithUIFactory.moduleName) def log(self, level, msg): self._logger.logp(level, self.__class__.__name__, inspect.stack()[1][3], msg) # Autopsy will pass in the settings from the UI panel def __init__(self, settings): self.local_settings = settings # Where any setup and configuration is done # TODO: Add any setup code that you need here. def startUp(self, context): # As an example, determine if user configured a flag in UI if self.local_settings.getSetting("flag") == "true": self.log(Level.INFO, "flag is set") else: self.log(Level.INFO, "flag is not set") # Throw an IngestModule.IngestModuleException exception if there was a problem setting up # raise IngestModuleException("Oh No!") pass # Where the analysis is done. Each file will be passed into here. # TODO: Add your analysis code in here. def process(self, file): # See code in pythonExamples/fileIngestModule.py for example code return IngestModule.ProcessResult.OK # Where any shutdown code is run and resources are freed. # TODO: Add any shutdown code that you need here. def shutDown(self): pass # UI that is shown to user for each ingest job so they can configure the job. # TODO: Rename this class SampleFileIngestModuleWithUISettingsPanel(IngestModuleIngestJobSettingsPanel): # Note, we can't use a self.settings instance variable. # Rather, self.local_settings is used. # https://wiki.python.org/jython/UserGuide#javabean-properties # Jython Introspector generates a property - 'settings' on the basis # of getSettings() defined in this class. Since only getter function # is present, it creates a read-only 'settings' property. This auto- # generated read-only property overshadows the instance-variable - # 'settings' # We get passed in a previous version of the settings so that we can # prepopulate the UI # TODO: Update this for your UI def __init__(self, settings): self.local_settings = settings self.initComponents() self.customizeComponents() # TODO: Update this for your UI def checkBoxEvent(self, event): if self.checkbox.isSelected(): self.local_settings.setSetting("flag", "true") else: self.local_settings.setSetting("flag", "false") # TODO: Update this for your UI def initComponents(self): self.setLayout(BoxLayout(self, BoxLayout.Y_AXIS)) self.checkbox = JCheckBox("Flag", actionPerformed=self.checkBoxEvent) self.add(self.checkbox) # TODO: Update this for your UI def customizeComponents(self): self.checkbox.setSelected(self.local_settings.getSetting("flag") == "true") # Return the settings used def getSettings(self): return self.local_settings
esaunders/autopsy
pythonExamples/fileIngestModuleWithGui.py
Python
apache-2.0
7,878
[ "Brian" ]
af408653d56a12c239750b885b6e74d60125a1aef454140b6f18f983acabc9eb
# coding: utf8 # Copyright 2014-2017 CERN. This software is distributed under the # terms of the GNU General Public Licence version 3 (GPL Version 3), # copied verbatim in the file LICENCE.md. # In applying this licence, CERN does not waive the privileges and immunities # granted to it by virtue of its status as an Intergovernmental Organization or # submit itself to any jurisdiction. # Project website: http://blond.web.cern.ch/ """ Unittest for llrf.cavity_feedback :Authors: **Birk Emil Karlsen-Baeck**, **Helga Timko** """ import unittest import numpy as np import matplotlib.pyplot as plt from blond.llrf.cavity_feedback import SPSOneTurnFeedback, CavityFeedbackCommissioning from blond.beam.beam import Beam, Proton from blond.beam.profile import Profile, CutOptions from blond.input_parameters.rf_parameters import RFStation from blond.input_parameters.ring import Ring class TestCavityFeedback(unittest.TestCase): def setUp(self): C = 2*np.pi*1100.009 # Ring circumference [m] gamma_t = 18.0 # Gamma at transition alpha = 1/gamma_t**2 # Momentum compaction factor p_s = 25.92e9 # Synchronous momentum at injection [eV] h = 4620 # 200 MHz system harmonic phi = 0. # 200 MHz RF phase # With this setting, amplitude in the two four-section, five-section # cavities must converge, respectively, to # 2.0 MV = 4.5 MV * 4/18 * 2 # 2.5 MV = 4.5 MV * 5/18 * 2 V = 4.5e6 # 200 MHz RF voltage N_t = 1 # Number of turns to track self.ring = Ring(C, alpha, p_s, Particle=Proton(), n_turns=N_t) self.rf = RFStation(self.ring, h, V, phi) N_m = 1e6 # Number of macro-particles for tracking N_b = 72*1.0e11 # Bunch intensity [ppb] # Gaussian beam profile self.beam = Beam(self.ring, N_m, N_b) sigma = 1.0e-9 bigaussian(self.ring, self.rf, self.beam, sigma, seed=1234, reinsertion=False) n_shift = 1550 # how many rf-buckets to shift beam self.beam.dt += n_shift * self.rf.t_rf[0, 0] self.profile = Profile( self.beam, CutOptions=CutOptions( cut_left=(n_shift-1.5)*self.rf.t_rf[0, 0], cut_right=(n_shift+2.5)*self.rf.t_rf[0, 0], n_slices=4*64)) self.profile.track() # Cavities l_cav = 43*0.374 v_g = 0.0946 tau = l_cav/(v_g*c)*(1 + v_g) f_cav = 200.222e6 n_cav = 2 # factor 2 because of two four/five-sections cavities short_cavity = TravelingWaveCavity(l_cav**2 * n_cav * 27.1e3 / 8, f_cav, 2*np.pi*tau) shortInducedVoltage = InducedVoltageTime(self.beam, self.profile, [short_cavity]) l_cav = 54*0.374 tau = l_cav/(v_g*c)*(1 + v_g) long_cavity = TravelingWaveCavity(l_cav**2 * n_cav * 27.1e3 / 8, f_cav, 2*np.pi*tau) longInducedVoltage = InducedVoltageTime(self.beam, self.profile, [long_cavity]) self.induced_voltage = TotalInducedVoltage( self.beam, self.profile, [shortInducedVoltage, longInducedVoltage]) self.induced_voltage.induced_voltage_sum() self.cavity_tracker = RingAndRFTracker( self.rf, self.beam, Profile=self.profile, interpolation=True, TotalInducedVoltage=self.induced_voltage) self.OTFB = SPSCavityFeedback( self.rf, self.beam, self.profile, G_llrf=5, G_tx=0.5, a_comb=15/16, turns=50, post_LS2=False, Commissioning=CavityFeedbackCommissioning(open_FF=True)) self.OTFB_tracker = RingAndRFTracker(self.rf, self.beam, Profile=self.profile, TotalInducedVoltage=None, CavityFeedback=self.OTFB, interpolation=True) def test_FB_pre_tracking(self): digit_round = 3 Vind4_mean = np.mean(np.absolute(self.OTFB.OTFB_1.V_coarse_tot))/1e6 Vind4_std = np.std(np.absolute(self.OTFB.OTFB_1.V_coarse_tot))/1e6 Vind4_mean_exp = 1.99886351363 Vind4_std_exp = 2.148426e-6 Vind5_mean = np.mean(np.absolute(self.OTFB.OTFB_2.V_coarse_tot))/1e6 Vind5_std = np.std(np.absolute(self.OTFB.OTFB_2.V_coarse_tot))/1e6 Vind5_mean_exp = 2.49906605189 Vind5_std_exp = 2.221665e-6 self.assertAlmostEqual(Vind4_mean, Vind4_mean_exp, places=digit_round, msg='In TestCavityFeedback test_FB_pretracking: ' + 'mean value of four-section cavity differs') self.assertAlmostEqual(Vind4_std, Vind4_std_exp, places=digit_round, msg='In TestCavityFeedback test_FB_pretracking: standard ' + 'deviation of four-section cavity differs') self.assertAlmostEqual(Vind5_mean, Vind5_mean_exp, places=digit_round, msg='In TestCavityFeedback test_FB_pretracking: ' + 'mean value of five-section cavity differs') self.assertAlmostEqual(Vind5_std, Vind5_std_exp, places=digit_round, msg='In TestCavityFeedback test_FB_pretracking: standard ' + 'deviation of five-section cavity differs') def test_FB_pre_tracking_IQ_v1(self): rtol = 1e-3 # relative tolerance atol = 0 # absolute tolerance # interpolate from coarse mesh to fine mesh V_fine_tot_4 = np.interp( self.profile.bin_centers, self.OTFB.OTFB_1.rf_centers, self.OTFB.OTFB_1.V_coarse_ind_gen) V_fine_tot_5 = np.interp( self.profile.bin_centers, self.OTFB.OTFB_2.rf_centers, self.OTFB.OTFB_2.V_coarse_ind_gen) V_tot_4 = V_fine_tot_4/1e6 V_tot_5 = V_fine_tot_5/1e6 V_sum = self.OTFB.V_sum/1e6 # expected generator voltage is only in Q V_tot_4_exp = 2.0j*np.ones(256) V_tot_5_exp = 2.5j*np.ones(256) V_sum_exp = 4.5j*np.ones(256) np.testing.assert_allclose(V_tot_4, V_tot_4_exp, rtol=rtol, atol=atol, err_msg='In TestCavityFeedback test_FB_pretracking_IQ: total voltage ' + 'in four-section cavity differs') np.testing.assert_allclose(V_tot_5, V_tot_5_exp, rtol=rtol, atol=atol, err_msg='In TestCavityFeedback test_FB_pretracking_IQ: total voltage ' + 'in five-section cavity differs') np.testing.assert_allclose(V_sum, V_sum_exp, rtol=rtol, atol=atol, err_msg='In TestCavityFeedback test_FB_pretracking_IQ: voltage sum ' + ' differs') def test_rf_voltage(self): digit_round = 7 # compute voltage self.cavity_tracker.rf_voltage_calculation() # compute voltage after OTFB pre-tracking self.OTFB_tracker.rf_voltage_calculation() # Since there is a systematic offset between the voltages, # compare the maxium of the ratio max_ratio = np.max(self.cavity_tracker.rf_voltage / self.OTFB_tracker.rf_voltage) max_ratio = max_ratio max_ratio_exp = 1.0016540193319539 self.assertAlmostEqual(max_ratio, max_ratio_exp, places=digit_round, msg='In TestCavityFeedback test_rf_voltage: ' + 'RF-voltages differ') def test_beam_loading(self): digit_round = 7 # Compute voltage with beam loading self.cavity_tracker.rf_voltage_calculation() cavity_tracker_total_voltage = self.cavity_tracker.rf_voltage \ + self.cavity_tracker.totalInducedVoltage.induced_voltage self.OTFB.track() self.OTFB_tracker.rf_voltage_calculation() OTFB_tracker_total_voltage = self.OTFB_tracker.rf_voltage max_ratio = np.max(cavity_tracker_total_voltage / OTFB_tracker_total_voltage) max_ration_exp = 1.0055233047525063 self.assertAlmostEqual(max_ratio, max_ration_exp, places=digit_round, msg='In TestCavityFeedback test_beam_loading: ' + 'total voltages differ') def test_Vsum_IQ(self): rtol = 1e-7 # relative tolerance atol = 0 # absolute tolerance self.OTFB.track() V_sum = self.OTFB.V_sum/1e6 V_sum_exp = np.array([2.5827763187e-04+4.497826566j , 2.5827763187e-04+4.497826566j , 2.5827763187e-04+4.497826566j , 2.5827763187e-04+4.497826566j , 2.5827763187e-04+4.497826566j , 2.5827763187e-04+4.497826566j , 2.5827763187e-04+4.497826566j , 2.5827763187e-04+4.497826566j , 2.5827763187e-04+4.497826566j , 2.5827763187e-04+4.497826566j , 2.5827763187e-04+4.497826566j , 2.5827763186e-04+4.497826566j , 2.5827763186e-04+4.497826566j , 2.5827763186e-04+4.497826566j , 2.5827763186e-04+4.497826566j , 2.5827763186e-04+4.497826566j , 2.5827763186e-04+4.497826566j , 2.5827763187e-04+4.497826566j , 2.5827763186e-04+4.497826566j , 2.5827763187e-04+4.497826566j , 2.5827763186e-04+4.497826566j , 2.5827763186e-04+4.497826566j , 2.5827763186e-04+4.497826566j , 2.5827763186e-04+4.497826566j , 2.5827763186e-04+4.497826566j , 2.5827763186e-04+4.497826566j , 2.5827763186e-04+4.497826566j , 2.5827763186e-04+4.497826566j , 2.5827763186e-04+4.497826566j , 2.5827763186e-04+4.497826566j , 2.5827763186e-04+4.497826566j , 2.5827763186e-04+4.497826566j , 2.5827763186e-04+4.497826566j , 2.5827763186e-04+4.497826566j , 2.5827763186e-04+4.497826566j , 2.5827763186e-04+4.497826566j , 2.5827763186e-04+4.497826566j , 2.5827763186e-04+4.497826566j , 2.5827763186e-04+4.497826566j , 2.5827763186e-04+4.497826566j , 2.5827763186e-04+4.497826566j , 2.5827763186e-04+4.497826566j , 2.5827763186e-04+4.497826566j , 2.5827763186e-04+4.497826566j , 2.5827763186e-04+4.497826566j , 2.5827763186e-04+4.497826566j , 2.5827763186e-04+4.497826566j , 2.5827763185e-04+4.497826566j , 2.5827763185e-04+4.497826566j , 2.5827763186e-04+4.497826566j , 2.5827763185e-04+4.497826566j , 2.5827763185e-04+4.497826566j , 2.5827763185e-04+4.497826566j , 2.5827763185e-04+4.497826566j , 2.5827763186e-04+4.497826566j , 2.5827763185e-04+4.497826566j , 2.5827763185e-04+4.497826566j , 2.5827763185e-04+4.497826566j , 2.5827763185e-04+4.497826566j , 2.5827763185e-04+4.497826566j , 2.5827763185e-04+4.497826566j , 2.5827763185e-04+4.497826566j , 2.5827763185e-04+4.497826566j , 2.5827763185e-04+4.497826566j , 2.5827763185e-04+4.497826566j , 2.5827763185e-04+4.497826566j , 2.5827763185e-04+4.497826566j , 2.7211082903e-04+4.4978315156j, 3.1250362304e-04+4.4978490279j, 3.5165856346e-04+4.4978691422j, 3.7604825623e-04+4.497885444j , 4.0960534138e-04+4.4979139243j, 4.4122258209e-04+4.4979445351j, 4.6856908981e-04+4.4979790123j, 5.1625702452e-04+4.4980530059j, 5.9040367398e-04+4.4981963582j, 6.7887023817e-04+4.4984137831j, 7.5904893974e-04+4.4986807294j, 8.2700402474e-04+4.4990411418j, 8.7582505677e-04+4.4995964993j, 8.6994404643e-04+4.5003886916j, 7.5045903432e-04+4.5014972574j, 4.3912456953e-04+4.5030058729j, -1.8600403500e-04+4.5050084589j, -1.1622444406e-03+4.5073509979j, -2.6792961680e-03+4.510132916j , -4.9961691195e-03+4.5135600289j, -8.4133537603e-03+4.5176631561j, -1.3338780932e-02+4.5225295511j, -2.0294488240e-02+4.5281591475j, -3.0386532228e-02+4.534786012j , -4.3938181865e-02+4.5419516034j, -6.1132819357e-02+4.5489680266j, -8.1997700093e-02+4.555224796j , -1.0908920882e-01+4.5603819179j, -1.4355354441e-01+4.563641761j , -1.8547605734e-01+4.5633974273j, -2.3587594114e-01+4.5582228127j, -2.9450459410e-01+4.5462841072j, -3.6298551422e-01+4.5251466487j, -4.4144673453e-01+4.4922598897j, -5.2864469287e-01+4.4452108361j, -6.2409501545e-01+4.3808677563j, -7.2823812612e-01+4.2946179162j, -8.4108073134e-01+4.1807386917j, -9.5833446725e-01+4.0368637895j, -1.0745640376e+00+3.8616018131j, -1.1865884525e+00+3.6501645292j, -1.2883489836e+00+3.4025749042j, -1.3747012172e+00+3.1138816921j, -1.4401444574e+00+2.779426695j , -1.4761084477e+00+2.4028289162j, -1.4748944532e+00+1.9850557907j, -1.4286473441e+00+1.5284378922j, -1.3289802158e+00+1.0348188495j, -1.1687970402e+00+0.5112753421j, -9.4301279117e-01-0.0303928583j, -6.4421157600e-01-0.5859581106j, -2.6756604191e-01-1.1470275079j, 1.8797976613e-01-1.6997257125j, 7.1991811737e-01-2.229842275j , 1.3268637215e+00-2.7259522866j, 2.0072428849e+00-3.1788356601j, 2.7576198052e+00-3.5780623532j, 3.5626421776e+00-3.910426641j , 4.4044185457e+00-4.164562411j , 5.2860593337e+00-4.3382136891j, 6.1975280356e+00-4.4267736087j, 7.1131024791e+00-4.4257941618j, 8.0172589410e+00-4.3357168878j, 8.8979905908e+00-4.1595593025j, 9.7471694742e+00-3.9007504938j, 1.0556332353e+01-3.5644671627j, 1.1302204198e+01-3.1653817869j, 1.1978432945e+01-2.7122544408j, 1.2583653215e+01-2.214881745j , 1.3111542557e+01-1.6857213733j, 1.3564375350e+01-1.1327477406j, 1.3937488096e+01-0.5734824569j, 1.4232635792e+01-0.0198698203j, 1.4456282704e+01+0.5218254956j, 1.4613743064e+01+1.0432039982j, 1.4710869799e+01+1.5341295769j, 1.4755258991e+01+1.9940972717j, 1.4754875217e+01+2.4166162655j, 1.4717028262e+01+2.7942558651j, 1.4649727975e+01+3.1297934963j, 1.4559635450e+01+3.4244797961j, 1.4454306808e+01+3.6777300792j, 1.4340466575e+01+3.8896579027j, 1.4222822357e+01+4.0649958801j, 1.4106706208e+01+4.2059792929j, 1.3994943602e+01+4.3172579991j, 1.3888950886e+01+4.4038968093j, 1.3789806400e+01+4.4698748453j, 1.3699260758e+01+4.5182083265j, 1.3619464079e+01+4.5513193917j, 1.3549894368e+01+4.5725377973j, 1.3490642575e+01+4.5846114651j, 1.3440962949e+01+4.5898280233j, 1.3398020086e+01+4.5902344013j, 1.3361834669e+01+4.587269337j , 1.3333035076e+01+4.5822676661j, 1.3309945479e+01+4.5760843538j, 1.3291507676e+01+4.5694497063j, 1.3277184880e+01+4.5629416638j, 1.3266021743e+01+4.5568244337j, 1.3257425068e+01+4.5513230024j, 1.3250804652e+01+4.5465163095j, 1.3245657754e+01+4.5423680337j, 1.3241804048e+01+4.5391726907j, 1.3238935683e+01+4.5368872108j, 1.3236534668e+01+4.534963213j , 1.3234483934e+01+4.5334110108j, 1.3232694545e+01+4.5321552208j, 1.3231076317e+01+4.5311697743j, 1.3229581752e+01+4.530646752j , 1.3228131824e+01+4.5305065364j, 1.3226714601e+01+4.5304598301j, 1.3225322516e+01+4.5304194047j, 1.3223940546e+01+4.5304446878j, 1.3222581996e+01+4.530481022j , 1.3221175647e+01+4.5306250559j, 1.3219721045e+01+4.530860676j , 1.3218268568e+01+4.5310981753j, 1.3216831753e+01+4.5313311328j, 1.3215384039e+01+4.5315739034j, 1.3213900918e+01+4.5318428696j, 1.3212417791e+01+4.5321117752j, 1.3210934659e+01+4.5323806201j, 1.3209451521e+01+4.5326494045j, 1.3207968378e+01+4.5329181282j, 1.3206499904e+01+4.5331875122j, 1.3205031421e+01+4.533456836j , 1.3203548258e+01+4.5337253784j, 1.3202065089e+01+4.5339938601j, 1.3200581916e+01+4.5342622812j, 1.3199098736e+01+4.5345306417j, 1.3197615552e+01+4.5347989416j, 1.3196132362e+01+4.5350671808j, 1.3194649166e+01+4.5353353595j, 1.3193165965e+01+4.5356034775j, 1.3191682758e+01+4.5358715349j, 1.3190199546e+01+4.5361395317j, 1.3188716329e+01+4.5364074679j, 1.3187233106e+01+4.5366753435j, 1.3185749878e+01+4.5369431585j, 1.3184266644e+01+4.5372109128j, 1.3182783405e+01+4.5374786066j, 1.3181300161e+01+4.5377462397j, 1.3179816911e+01+4.5380138122j, 1.3178333655e+01+4.5382813241j, 1.3176850394e+01+4.5385487754j, 1.3175367128e+01+4.538816166j , 1.3173883856e+01+4.5390834961j, 1.3172400579e+01+4.5393507655j, 1.3170917296e+01+4.5396179743j, 1.3169434008e+01+4.5398851225j, 1.3167950715e+01+4.5401522101j, 1.3166467416e+01+4.5404192371j, 1.3164984112e+01+4.5406862034j, 1.3163500802e+01+4.5409531092j, 1.3162017487e+01+4.5412199543j, 1.3160534167e+01+4.5414867388j, 1.3159050841e+01+4.5417534627j, 1.3157567510e+01+4.542020126j , 1.3156084173e+01+4.5422867287j, 1.3154600831e+01+4.5425532707j, 1.3153117483e+01+4.5428197521j, 1.3151634130e+01+4.5430861729j, 1.3150150772e+01+4.5433525331j, 1.3148667409e+01+4.5436188327j, 1.3147184039e+01+4.5438850717j, 1.3145700665e+01+4.54415125j , 1.3144217285e+01+4.5444173677j, 1.3142733900e+01+4.5446834249j, 1.3141250509e+01+4.5449494213j, 1.3139767113e+01+4.5452153572j, 1.3138283712e+01+4.5454812325j, 1.3136800305e+01+4.5457470471j, 1.3135316893e+01+4.5460128011j, 1.3133833476e+01+4.5462784945j, 1.3132350053e+01+4.5465441273j, 1.3130866624e+01+4.5468096995j, 1.3129383191e+01+4.5470752111j, 1.3127899752e+01+4.547340662j , 1.3126416308e+01+4.5476060523j, 1.3124932858e+01+4.547871382j , 1.3123449403e+01+4.5481366511j, 1.3121965942e+01+4.5484018595j, 1.3120482476e+01+4.5486670074j, 1.3118999005e+01+4.5489320946j, 1.3117515529e+01+4.5491971212j, 1.3116032047e+01+4.5494620872j, 1.3114548560e+01+4.5497269925j, 1.3113065067e+01+4.5499918373j]) np.testing.assert_allclose(V_sum_exp, V_sum, rtol=rtol, atol=atol, err_msg='In TestCavityFeedback test_Vsum_IQ: total voltage ' + 'is different from expected values!') class TestSPSCavityFeedback(unittest.TestCase): def setUp(self): # Parameters ---------------------------------------------------------- C = 2 * np.pi * 1100.009 # Ring circumference [m] gamma_t = 18.0 # Transition Gamma [-] alpha = 1 / (gamma_t ** 2) # Momentum compaction factor [-] p_s = 450e9 # Synchronous momentum [eV] h = 4620 # 200 MHz harmonic number [-] V = 10e6 # 200 MHz RF voltage [V] phi = 0 # 200 MHz phase [-] # Parameters for the Simulation N_m = 1e5 # Number of macro-particles for tracking N_b = 1.0e11 # Bunch intensity [ppb] N_t = 1 # Number of turns to track # Objects ------------------------------------------------------------- # Ring self.ring = Ring(C, alpha, p_s, Proton(), N_t) # RFStation self.rfstation = RFStation(self.ring, [h], [V], [phi], n_rf=1) # Beam self.beam = Beam(self.ring, N_m, N_b) self.profile = Profile(self.beam, CutOptions=CutOptions(cut_left=0.e-9, cut_right=self.rfstation.t_rev[0], n_slices=4620)) self.profile.track() # Cavity self.Commissioning = CavityFeedbackCommissioning() self.OTFB_new = SPSOneTurnFeedback(self.rfstation, self.beam, self.profile, 3, a_comb=63 / 64, Commissioning=self.Commissioning) self.OTFB_new.update_variables() self.turn_array = np.linspace(0, 2 * self.rfstation.t_rev[0], 2 * self.OTFB_new.n_coarse) def test_set_point(self): self.OTFB_new.set_point() t_sig = np.zeros(2 * self.OTFB_new.n_coarse, dtype=complex) t_sig[-self.OTFB_new.n_coarse:] = (4/9) * 10e6 * np.exp(1j * (np.pi/2 - self.rfstation.phi_rf[0,0])) np.testing.assert_allclose(self.OTFB_new.V_SET, t_sig) def test_error_and_gain(self): self.OTFB_new.error_and_gain() np.testing.assert_allclose(self.OTFB_new.DV_GEN, self.OTFB_new.V_SET * self.OTFB_new.G_llrf) def test_comb(self): sig = np.zeros(self.OTFB_new.n_coarse) self.OTFB_new.DV_COMB_OUT = np.sin(2 * np.pi * self.turn_array / self.rfstation.t_rev[0]) self.OTFB_new.DV_GEN = -np.sin(2 * np.pi * self.turn_array / self.rfstation.t_rev[0]) self.OTFB_new.a_comb = 0.5 self.OTFB_new.comb() np.testing.assert_allclose(self.OTFB_new.DV_COMB_OUT[-self.OTFB_new.n_coarse:], sig) def test_one_turn_delay(self): self.OTFB_new.DV_COMB_OUT = np.zeros(2 * self.OTFB_new.n_coarse, dtype=complex) self.OTFB_new.DV_COMB_OUT[self.OTFB_new.n_coarse] = 1 self.OTFB_new.one_turn_delay() self.assertEqual(np.argmax(self.OTFB_new.DV_DELAYED), 2 * self.OTFB_new.n_coarse - self.OTFB_new.n_mov_av) def test_mod_to_fr(self): self.OTFB_new.DV_DELAYED = np.zeros(2 * self.OTFB_new.n_coarse, dtype=complex) self.OTFB_new.DV_DELAYED[-self.OTFB_new.n_coarse:] = 1 + 1j * 0 self.mod_phi = np.copy(self.OTFB_new.dphi_mod) self.OTFB_new.mod_to_fr() ref_DV_MOD_FR = np.load("ref_DV_MOD_FR.npy") np.testing.assert_allclose(self.OTFB_new.DV_MOD_FR[-self.OTFB_new.n_coarse:], ref_DV_MOD_FR) self.OTFB_new.DV_DELAYED = np.zeros(2 * self.OTFB_new.n_coarse, dtype=complex) self.OTFB_new.DV_DELAYED[-self.OTFB_new.n_coarse:] = 1 + 1j * 0 self.OTFB_new.dphi_mod = 0 self.OTFB_new.mod_to_fr() time_array = self.OTFB_new.rf_centers - 0.5*self.OTFB_new.T_s ref_sig = np.cos((self.OTFB_new.omega_c - self.OTFB_new.omega_r) * time_array[:self.OTFB_new.n_coarse]) + \ 1j * np.sin((self.OTFB_new.omega_c - self.OTFB_new.omega_r) * time_array[:self.OTFB_new.n_coarse]) np.testing.assert_allclose(self.OTFB_new.DV_MOD_FR[-self.OTFB_new.n_coarse:], ref_sig) self.OTFB_new.dphi_mod = self.mod_phi def test_mov_avg(self): sig = np.zeros(self.OTFB_new.n_coarse-1) sig[:self.OTFB_new.n_mov_av] = 1 self.OTFB_new.DV_MOD_FR = np.zeros(2 * self.OTFB_new.n_coarse) self.OTFB_new.DV_MOD_FR[-self.OTFB_new.n_coarse + 1:] = sig self.OTFB_new.mov_avg() sig = np.zeros(self.OTFB_new.n_coarse) sig[:self.OTFB_new.n_mov_av] = (1/self.OTFB_new.n_mov_av) * np.array(range(self.OTFB_new.n_mov_av)) sig[self.OTFB_new.n_mov_av: 2 * self.OTFB_new.n_mov_av] = (1/self.OTFB_new.n_mov_av) * (self.OTFB_new.n_mov_av - np.array(range(self.OTFB_new.n_mov_av))) np.testing.assert_allclose(np.abs(self.OTFB_new.DV_MOV_AVG[-self.OTFB_new.n_coarse:]), sig) def test_mod_to_frf(self): self.OTFB_new.DV_MOV_AVG = np.zeros(2 * self.OTFB_new.n_coarse, dtype=complex) self.OTFB_new.DV_MOV_AVG[-self.OTFB_new.n_coarse:] = 1 + 1j * 0 self.mod_phi = np.copy(self.OTFB_new.dphi_mod) self.OTFB_new.mod_to_frf() ref_DV_MOD_FRF = np.load("ref_DV_MOD_FRF.npy") np.testing.assert_allclose(self.OTFB_new.DV_MOD_FRF[-self.OTFB_new.n_coarse:], ref_DV_MOD_FRF) self.OTFB_new.DV_MOV_AVG = np.zeros(2 * self.OTFB_new.n_coarse, dtype=complex) self.OTFB_new.DV_MOV_AVG[-self.OTFB_new.n_coarse:] = 1 + 1j * 0 self.OTFB_new.dphi_mod = 0 self.OTFB_new.mod_to_frf() time_array = self.OTFB_new.rf_centers - 0.5*self.OTFB_new.T_s ref_sig = np.cos(-(self.OTFB_new.omega_c - self.OTFB_new.omega_r) * time_array[:self.OTFB_new.n_coarse]) + \ 1j * np.sin(-(self.OTFB_new.omega_c - self.OTFB_new.omega_r) * time_array[:self.OTFB_new.n_coarse]) np.testing.assert_allclose(self.OTFB_new.DV_MOD_FRF[-self.OTFB_new.n_coarse:], ref_sig) self.OTFB_new.dphi_mod = self.mod_phi def test_sum_and_gain(self): self.OTFB_new.V_SET[-self.OTFB_new.n_coarse:] = np.ones(self.OTFB_new.n_coarse, dtype=complex) self.OTFB_new.DV_MOD_FRF[-self.OTFB_new.n_coarse:] = np.ones(self.OTFB_new.n_coarse, dtype=complex) self.OTFB_new.sum_and_gain() sig = 2 * np.ones(self.OTFB_new.n_coarse) * self.OTFB_new.G_tx * self.OTFB_new.T_s / self.OTFB_new.TWC.R_gen np.testing.assert_allclose(self.OTFB_new.I_GEN[-self.OTFB_new.n_coarse:], sig) def test_gen_response(self): # Tests generator response at resonant frequency. self.OTFB_new.I_GEN = np.zeros(2 * self.OTFB_new.n_coarse, dtype=complex) self.OTFB_new.I_GEN[self.OTFB_new.n_coarse] = 1 self.OTFB_new.TWC.impulse_response_gen(self.OTFB_new.TWC.omega_r, self.OTFB_new.rf_centers) self.OTFB_new.gen_response() sig = np.zeros(self.OTFB_new.n_coarse) sig[1:1 + self.OTFB_new.n_mov_av] = 2 * self.OTFB_new.TWC.R_gen / self.OTFB_new.TWC.tau sig[0] = self.OTFB_new.TWC.R_gen / self.OTFB_new.TWC.tau sig[self.OTFB_new.n_mov_av + 1] = self.OTFB_new.TWC.R_gen / self.OTFB_new.TWC.tau np.testing.assert_allclose(np.abs(self.OTFB_new.V_IND_COARSE_GEN[-self.OTFB_new.n_coarse:]), sig, atol=1e-5) # Tests generator response at carrier frequency. self.OTFB_new.TWC.impulse_response_gen(self.OTFB_new.omega_c, self.OTFB_new.rf_centers) self.OTFB_new.I_GEN = np.zeros(2 * self.OTFB_new.n_coarse, dtype=complex) self.OTFB_new.I_GEN[self.OTFB_new.n_coarse] = 1 self.OTFB_new.gen_response() ref_V_IND_COARSE_GEN = np.load("ref_V_IND_COARSE_GEN.npy") np.testing.assert_allclose(self.OTFB_new.V_IND_COARSE_GEN[-self.OTFB_new.n_coarse:], ref_V_IND_COARSE_GEN) if __name__ == '__main__': unittest.main()
blond-admin/BLonD
unittests/llrf/test_cavity_feedback.py
Python
gpl-3.0
27,352
[ "Gaussian" ]
82919d8f0acc27b60f881b6ffc65decc6492e98649aedd067cd1f6d4be4843f0
"""Classes related to preparing members for the next calibration iteration """ import os import numpy as np import esutil from ..catalog import Entry, Catalog from ..galaxy import GalaxyCatalog from ..utilities import read_members from ..configuration import Configuration class PrepMembers(object): """ Class to prepare members for input to the next calibration iteration. """ def __init__(self, conf): """ Instantiate a PrepMembers object. Parameters ---------- conf: `str` or `redmapper.Configuration` Config filename or configuration object """ if not isinstance(conf, Configuration): self.config = Configuration(conf) else: self.config = conf def run(self, mode): """ Run the member preparation. Output members are put into self.config.zmemfile. Parameters ---------- mode: `str` May be "z_init": use initial cluster seed redshift as member redshift or may be "cg": use the most likely central spec_z as member redshift Raises ------ RuntimeError: If mode is not "z_init" or "cg". """ cat = Catalog.from_fits_file(self.config.catfile) if mode == 'z_init': cat_z = cat.z_init elif mode == 'cg': cat_z = cat.cg_spec_z else: raise RuntimeError("Unsupported mode %s" % (mode)) mem = read_members(self.config.catfile) # Cut the clusters use, = np.where((cat.Lambda / cat.scaleval > self.config.calib_minlambda) & (cat.scaleval > 0.0) & (np.abs(cat_z - cat.z_lambda) < self.config.calib_zlambda_clean_nsig * cat.z_lambda_e)) cat = cat[use] cat_z = cat_z[use] # Cut the members use, = np.where((mem.p * mem.theta_i * mem.theta_r > self.config.calib_pcut) | (mem.pcol > self.config.calib_pcut)) mem = mem[use] # Match cut clusters to members a, b = esutil.numpy_util.match(cat.mem_match_id, mem.mem_match_id) newmem = Catalog(np.zeros(b.size, dtype=[('z', 'f4'), ('z_lambda', 'f4'), ('p', 'f4'), ('pcol', 'f4'), ('central', 'i2'), ('ra', 'f8'), ('dec', 'f8'), ('mag', 'f4', self.config.nmag), ('mag_err', 'f4', self.config.nmag), ('refmag', 'f4'), ('refmag_err', 'f4'), ('ebv', 'f4')])) newmem.ra[:] = mem.ra[b] newmem.dec[:] = mem.dec[b] newmem.p[:] = mem.p[b] newmem.pcol[:] = mem.pcol[b] newmem.mag[:, :] = mem.mag[b, :] newmem.mag_err[:, :] = mem.mag_err[b, :] newmem.refmag[:] = mem.refmag[b] newmem.refmag_err[:] = mem.refmag_err[b] newmem.ebv[:] = mem.ebv[b] cent, = np.where(mem.r[b] < 0.0001) newmem.central[cent] = 1 newmem.z[:] = cat_z[a] newmem.z_lambda = cat.z_lambda[a] if self.config.calib_smooth > 0.0: newmem.z[:] += self.config.calib_smooth * np.random.normal(size=newmem.size) newmem.to_fits_file(self.config.zmemfile)
erykoff/redmapper
redmapper/calibration/prepmembers.py
Python
apache-2.0
3,678
[ "Galaxy" ]
40b087b5c0054681fecb46e9af4b1033a8fd4ba8e61940cd7020b6dbf6b39bbb
import os import numpy as np import matplotlib.pyplot as plt import test_sersic_highn_basic import test_gaussian_basic # Now try a trick... import the sersic plotting script (this will run it, but it's quick) and # use the Sersic n=.5 results from there directly import plot_sersic_highn print "Plotting the Gaussian results" nobs = test_sersic_highn_basic.NOBS outfile = os.path.join("outputs", "gaussian_basic_output_N"+str(nobs)+".asc") data = np.loadtxt(outfile) g1obs_draw = data[:, 0] g2obs_draw = data[:, 1] sigma_draw = data[:, 2] delta_g1obs = data[:, 3] delta_g2obs = data[:, 4] delta_sigma = data[:, 5] err_g1obs = data[:, 6] err_g2obs = data[:, 7] err_sigma = data[:, 8] # First do the plot of g1 YMAX_ZOOMIN = 2.5e-4 XMIN = -.6 XMAX = .8 plt.clf() plt.axhline(ls='--', color='k') plt.axvline(ls='--', color='k') plt.xlim(XMIN, XMAX) plt.errorbar(g1obs_draw, delta_g1obs, yerr=err_g1obs, fmt='x', label="Gaussian") plt.xlabel(r'g$_1$ (DFT)') plt.ylabel(r'$\Delta$g$_1$ (DFT - Photon)') plt.ylim(-YMAX_ZOOMIN, YMAX_ZOOMIN) plt.title("Gaussian comparison") plt.errorbar( plot_sersic_highn.g1obs_draw[:, 0], plot_sersic_highn.delta_g1obs[:, 0], # First column is n=.5 yerr=plot_sersic_highn.err_g1obs[:, 0], fmt='x', label="n = "+str(test_sersic_highn_basic.SERSIC_N_TEST[0])+" ("+str( plot_sersic_highn.test_case)+")") plt.legend() plt.subplots_adjust(left=0.15) plt.savefig(os.path.join('plots', 'gaussian_zoomin_g1.png')) # Then do the plot of g2 YMAX_ZOOMIN = 2.5e-4 XMIN = -.6 XMAX = .8 plt.clf() plt.axhline(ls='--', color='k') plt.axvline(ls='--', color='k') plt.xlim(XMIN, XMAX) plt.errorbar(g2obs_draw, delta_g2obs, yerr=err_g2obs, fmt='x', label="Gaussian") plt.xlabel(r'g$_2$ (DFT)') plt.ylabel(r'$\Delta$g$_2$ (DFT - Photon)') plt.ylim(-YMAX_ZOOMIN, YMAX_ZOOMIN) plt.title("Gaussian comparison") plt.errorbar( plot_sersic_highn.g2obs_draw[:, 0], plot_sersic_highn.delta_g2obs[:, 0], # First column is n=.5 yerr=plot_sersic_highn.err_g2obs[:, 0], fmt='x', label="n = "+str(test_sersic_highn_basic.SERSIC_N_TEST[0])+" ("+str( plot_sersic_highn.test_case)+")") plt.legend() plt.subplots_adjust(left=0.15) plt.savefig(os.path.join('plots', 'gaussian_zoomin_g2.png')) # Then plot comparisons of the Gaussian DFT versus n=0.5 DFT and photons shooting results YMAX_ZOOMIN = 2.5e-4 XMIN = -.8 XMAX = .8 plt.clf() plt.axhline(ls='--', color='k') plt.axvline(ls='--', color='k') plt.xlim(XMIN, XMAX) plt.plot(g1obs_draw, plot_sersic_highn.g1obs_draw[:, 0] - g1obs_draw, '+', label=r"Sersic n=0.5 via DFT g$_1$") plt.plot(g2obs_draw, plot_sersic_highn.g2obs_draw[:, 0] - g2obs_draw, 'x', label=r"Sersic n=0.5 via DFT g$_2$") plt.errorbar( g1obs_draw, plot_sersic_highn.g1obs_draw[:, 0] - plot_sersic_highn.delta_g1obs[:, 0] - g1obs_draw, yerr=err_g1obs, fmt='+', label=r"Sersic n=0.5 via Photon Shooting g$_1$") plt.errorbar( g2obs_draw, plot_sersic_highn.g2obs_draw[:, 0] - plot_sersic_highn.delta_g2obs[:, 0] - g2obs_draw, yerr=err_g2obs, fmt='x', label=r"Sersic n=0.5 via Photon Shooting g$_2$") plt.xlabel(r'g$_i$ (Gaussian via DFT)') plt.ylabel(r'g$_i$ (Sersic n=0.5) - g$_i$ (Gaussian)') plt.ylim(-YMAX_ZOOMIN, YMAX_ZOOMIN) plt.title("Gaussian vs Sersic n=0.5 comparison") plt.legend() plt.subplots_adjust(left=0.15) plt.savefig(os.path.join('plots', 'gaussian_vs_Sersic.png'))
mardom/GalSim
devel/external/test_sersic_highn/plot_gaussian.py
Python
gpl-3.0
3,387
[ "Gaussian" ]
2b38e2d5fff0d80087922aaed7b715217604a86b4095fb4a9ffcbe0003d4c74a
#!/usr/bin/python # -*- coding: utf-8 -*- # # --- BEGIN_HEADER --- # # resedit - Resource editor back end # Copyright (C) 2003-2014 The MiG Project lead by Brian Vinter # # This file is part of MiG. # # MiG is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # MiG is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # -- END_HEADER --- # # Martin Rehr martin@rehr.dk August 2005 """Display resource editor""" import socket import shared.resconfkeywords as resconfkeywords import shared.returnvalues as returnvalues from shared.functional import validate_input_and_cert from shared.init import initialize_main_variables, find_entry from shared.refunctions import list_runtime_environments from shared.resource import init_conf, empty_resource_config from shared.vgrid import res_allowed_vgrids def signature(): """Signature of the main function""" defaults = {'hosturl': [''], 'hostidentifier':['']} return ['html_form', defaults] def field_size(value, default=30): """Find best input field size for value""" value_len = len("%s" % value) if value_len < 40: size = default elif value_len > 120: size = 120 else: size = value_len return size def available_choices(configuration, client_id, resource_id, field, spec): """Find the available choices for the selectable field. Tries to lookup all valid choices from configuration if field is specified to be a string variable. """ if 'boolean' == spec['Type']: choices = [True, False] elif spec['Type'] in ('string', 'multiplestrings'): try: choices = getattr(configuration, '%ss' % field.lower()) except AttributeError, exc: print exc choices = [] else: choices = [] if not spec['Required']: choices = [''] + choices default = spec['Value'] if default in choices: choices = [default] + [i for i in choices if not default == i] return choices def main(client_id, user_arguments_dict): """Main function used by front end""" (configuration, logger, output_objects, op_name) = \ initialize_main_variables(client_id, op_header=False) defaults = signature()[1] (validate_status, accepted) = validate_input_and_cert( user_arguments_dict, defaults, output_objects, client_id, configuration, allow_rejects=False, ) if not validate_status: return (accepted, returnvalues.CLIENT_ERROR) hosturl = accepted['hosturl'][-1] hostidentifier = accepted['hostidentifier'][-1] resource_id = '%s.%s' % (hosturl, hostidentifier) extra_selects = 3 # Find allowed VGrids and Runtimeenvironments and add them to # configuration object for automated choice handling allowed_vgrids = [''] + res_allowed_vgrids(configuration, resource_id) allowed_vgrids.sort() configuration.vgrids = allowed_vgrids (re_status, allowed_run_envs) = list_runtime_environments(configuration) allowed_run_envs.sort() area_cols = 80 area_rows = 5 status = returnvalues.OK logger.info('Starting Resource edit GUI.') title_entry = find_entry(output_objects, 'title') title_entry['text'] = 'Resource Editor' output_objects.append({'object_type': 'header', 'text': 'Resource Editor' }) output_objects.append({'object_type': 'sectionheader', 'text' : '%s Resource Editor' % configuration.short_title}) output_objects.append({'object_type': 'text', 'text' : ''' Please fill in or edit the fields below to fit your %s resource reservation. Most fields will work with their default values. So if you are still in doubt after reading the help description, you can likely just leave the field alone.''' % configuration.short_title }) if hosturl and hostidentifier: conf = init_conf(configuration, hosturl, hostidentifier) if not conf: status = returnvalues.CLIENT_ERROR output_objects.append({'object_type': 'error_text', 'text' : '''No such resource! (%s.%s)''' % (hosturl, hostidentifier)}) return (output_objects, status) else: conf = empty_resource_config(configuration) res_fields = resconfkeywords.get_resource_specs(configuration) exe_fields = resconfkeywords.get_exenode_specs(configuration) store_fields = resconfkeywords.get_storenode_specs(configuration) output_objects.append({'object_type': 'html_form', 'text': """ <form method='post' action='reseditaction.py'> """ }) # Resource overall fields output_objects.append({'object_type': 'sectionheader', 'text' : "Main Resource Settings"}) output_objects.append({'object_type': 'text', 'text' : """This section configures general options for the resource.""" }) (title, field) = ('Host FQDN', 'HOSTURL') if hosturl: try: hostip = conf.get('HOSTIP', socket.gethostbyname(hosturl)) except: hostip = '<unknown>' output_objects.append({'object_type': 'html_form', 'text' : """<br /> <b>%s:</b>&nbsp;<a class='infolink' href='resedithelp.py#res-%s'>help</a><br /> <input type='hidden' name='%s' value='%s' /> <input type='hidden' name='hostip' value='%s' /> %s <br /> <br />""" % (title, field, field, conf[field], hostip, conf[field]) }) else: output_objects.append({'object_type': 'html_form', 'text' : """<br /> <b>%s:</b>&nbsp;<a class='infolink' href='resedithelp.py#res-%s'>help</a><br /> <input type='text' name='%s' size='%d' value='%s' /> <br /> <br />""" % (title, field, field, field_size(conf[field]), conf[field]) }) (title, field) = ('Host identifier', 'HOSTIDENTIFIER') if hostidentifier: output_objects.append({'object_type': 'html_form', 'text' : """<br /> <b>%s:</b>&nbsp;<a class='infolink' href='resedithelp.py#res-%s'>help</a><br /> <input type='hidden' name='%s' value='%s' /> %s <br /> <br />""" % (title, field, field, conf[field], conf[field]) }) (field, title) = 'frontendhome', 'Frontend Home Path' output_objects.append({'object_type': 'html_form', 'text' : """<br /> <b>%s:</b>&nbsp;<a class='infolink' href='resedithelp.py#%s'>help</a><br /> <input type='text' name='%s' size='%d' value='%s' /> <br /> <br />""" % (title, field, field, field_size(conf[field]), conf[field]) }) for (field, spec) in res_fields: title = spec['Title'] field_type = spec['Type'] if 'invisible' == spec['Editor']: continue elif 'input' == spec['Editor']: output_objects.append({'object_type': 'html_form', 'text' : """<br /> <b>%s:</b>&nbsp;<a class='infolink' href='resedithelp.py#res-%s'>help</a><br /> <input type='text' name='%s' size='%d' value='%s' /> <br /> <br />""" % (title, field, field, field_size(conf[field]), conf[field]) }) elif 'select' == spec['Editor']: choices = available_choices(configuration, client_id, resource_id, field, spec) res_value = conf[field] value_select = '' if field_type.startswith('multiple'): select_count = len(res_value) + extra_selects else: select_count = 1 res_value = [res_value] for i in range(select_count): value_select += "<select name='%s'>\n" % field for name in choices: selected = '' if i < len(res_value) and res_value[i] == name: selected = 'selected' display = "%s" % name if display == '': display = ' ' value_select += """<option %s value='%s'>%s</option>\n""" \ % (selected, name, display) value_select += """</select><br />\n""" output_objects.append({'object_type': 'html_form', 'text' : """<br /> <b>%s:</b>&nbsp;<a class='infolink' href='resedithelp.py#res-%s'>help</a><br /> %s <br />""" % (title, field, value_select) }) # Not all resource fields here map directly to keywords/specs input field (title, field) = ('Runtime Environments', 'RUNTIMEENVIRONMENT') re_list = conf[field] show = re_list + [('', []) for i in range(extra_selects)] re_select = "<input type='hidden' name='runtime_env_fields' value='%s'/>\n" \ % len(show) i = 0 for active in show: re_select += "<select name='runtimeenvironment%d'>\n" % i for name in allowed_run_envs + ['']: selected = '' if active[0] == name: selected = 'selected' display = "%s" % name if display == '': display = ' ' re_select += """<option %s value='%s'>%s</option>\n""" % \ (selected, name, display) re_select += """</select><br />\n""" values = '\n'.join(['%s=%s' % pair for pair in active[1]]) re_select += "<textarea cols='%d' rows='%d' name='re_values%d'>%s</textarea><br />\n" % \ (area_cols, area_rows, i, values) i += 1 output_objects.append({'object_type': 'html_form', 'text' : """<br /> <b>%s:</b>&nbsp;<a class='infolink' href='resedithelp.py#res-%s'>help</a><br /> Please enter any required environment variable settings on the form NAME=VALUE in the box below each selected runtimeenvironment.<br /> %s <br />""" % (title, field, re_select) }) # Execution node fields output_objects.append({'object_type': 'sectionheader', 'text' : "Execution nodes"}) output_objects.append({'object_type': 'text', 'text' : """This section configures execution nodes on the resource.""" }) (field, title) = 'executionnodes', 'Execution Node(s)' output_objects.append({'object_type': 'html_form', 'text' : """<br /> <b>%s:</b>&nbsp;<a class='infolink' href='resedithelp.py#exe-%s'>help</a><br /> <input type='text' name='exe-%s' size='%d' value='%s' /> <br /> <br />""" % (title, field, field, field_size(conf['all_exes'][field]), conf['all_exes'][field]) }) (field, title) = 'executionhome', 'Execution Home Path' output_objects.append({'object_type': 'html_form', 'text' : """<br /> <b>%s:</b>&nbsp;<a class='infolink' href='resedithelp.py#exe-%s'>help</a><br /> <input type='text' name='exe-%s' size='%d' value='%s' /> <br /> <br />""" % (title, field, field, field_size(conf['all_exes'][field]), conf['all_exes'][field]) }) for (field, spec) in exe_fields: title = spec['Title'] field_type = spec['Type'] if 'invisible' == spec['Editor']: continue elif 'input' == spec['Editor']: output_objects.append({'object_type': 'html_form', 'text' : """<br /> <b>%s:</b>&nbsp;<a class='infolink' href='resedithelp.py#exe-%s'>help</a><br /> <input type='text' name='exe-%s' size='%d' value='%s' /> <br /> <br />""" % (title, field, field, field_size(conf['all_exes'][field]), conf['all_exes'][field]) }) elif 'select' == spec['Editor']: choices = available_choices(configuration, client_id, resource_id, field, spec) exe_value = conf['all_exes'][field] value_select = '' if field_type.startswith('multiple'): select_count = len(exe_value) + extra_selects else: select_count = 1 exe_value = [exe_value] for i in range(select_count): value_select += "<select name='exe-%s'>\n" % field for name in choices: selected = '' if i < len(exe_value) and exe_value[i] == name: selected = 'selected' display = "%s" % name if display == '': display = ' ' value_select += """<option %s value='%s'>%s</option>\n""" \ % (selected, name, display) value_select += """</select><br />\n""" output_objects.append({'object_type': 'html_form', 'text' : """<br /> <b>%s:</b>&nbsp;<a class='infolink' href='resedithelp.py#exe-%s'>help</a><br /> %s <br />""" % (title, field, value_select) }) # Storage node fields output_objects.append({'object_type': 'sectionheader', 'text' : "Storage nodes"}) output_objects.append({'object_type': 'text', 'text' : """This section configures storage nodes on the resource.""" }) (field, title) = 'storagenodes', 'Storage Node(s)' output_objects.append({'object_type': 'html_form', 'text' : """<br /> <b>%s:</b>&nbsp;<a class='infolink' href='resedithelp.py#store-%s'>help</a><br /> <input type='text' name='store-%s' size='%d' value='%s' /> <br /> <br />""" % (title, field, field, field_size(conf['all_stores'][field]), conf['all_stores'][field]) }) (field, title) = 'storagehome', 'Storage Home Path' output_objects.append({'object_type': 'html_form', 'text' : """<br /> <b>%s:</b>&nbsp;<a class='infolink' href='resedithelp.py#store-%s'>help</a><br /> <input type='text' name='store-%s' size='%d' value='%s' /> <br /> <br />""" % (title, field, field, field_size(conf['all_stores'][field]), conf['all_stores'][field]) }) for (field, spec) in store_fields: title = spec['Title'] field_type = spec['Type'] if 'invisible' == spec['Editor']: continue elif 'input' == spec['Editor']: output_objects.append({'object_type': 'html_form', 'text' : """<br /> <b>%s:</b>&nbsp;<a class='infolink' href='resedithelp.py#store-%s'>help</a><br /> <input type='text' name='store-%s' size='%d' value='%s' /> <br /> <br />""" % (title, field, field, field_size(conf['all_stores'][field]), conf['all_stores'][field]) }) elif 'select' == spec['Editor']: choices = available_choices(configuration, client_id, resource_id, field, spec) store_value = conf['all_stores'][field] value_select = '' if field_type.startswith('multiple'): select_count = len(store_value) + extra_selects else: select_count = 1 store_value = [store_value] for i in range(select_count): value_select += "<select name='store-%s'>\n" % field for name in choices: selected = '' if i < len(store_value) and store_value[i] == name: selected = 'selected' display = "%s" % name if display == '': display = ' ' value_select += """<option %s value='%s'>%s</option>\n""" \ % (selected, name, display) value_select += """</select><br />\n""" output_objects.append({'object_type': 'html_form', 'text' : """<br /> <b>%s:</b>&nbsp;<a class='infolink' href='resedithelp.py#store-%s'>help</a><br /> %s <br />""" % (title, field, value_select) }) output_objects.append({'object_type': 'html_form', 'text': """ <input type='submit' value='Save' /> </form> """ }) return (output_objects, status)
heromod/migrid
mig/shared/functionality/resedit.py
Python
gpl-2.0
17,421
[ "Brian" ]
0e99e76650c00c90bae73d7c5fc5c79e304a5fbdb43cf582ec076195d3b3792b
######################################################################## # $HeadURL$ # Author : Andrei Tsaregorodtsev ######################################################################## """ Utilities for managing DIRAC configuration: getCEsFromCS getUnusedGridCEs getUnusedGridSEs getSiteUpdates getSEUpdates """ __RCSID__ = "$Id$" import re import types from DIRAC import gConfig, gLogger, S_OK from DIRAC.Core.Utilities import List from DIRAC.Core.Utilities.Grid import getBdiiCEInfo, getBdiiSEInfo, ldapService from DIRAC.Core.Utilities.SitesDIRACGOCDBmapping import getDIRACSiteName, getDIRACSesForSRM from DIRAC.ConfigurationSystem.Client.Helpers.Path import cfgPath from DIRAC.Core.Utilities.Pfn import pfnparse from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getVOs, getVOOption def getGridVOs(): """ Get all the VOMS VO names served by this DIRAC service """ voNames = [] result = getVOs() if not result['OK']: return result else: vos = result['Value'] for vo in vos: vomsVO = getVOOption( vo, "VOMSName" ) if vomsVO: voNames.append( vomsVO ) return S_OK( voNames ) def getCEsFromCS(): """ Get all the CEs defined in the CS """ knownCEs = [] result = gConfig.getSections( '/Resources/Sites' ) if not result['OK']: return result grids = result['Value'] for grid in grids: result = gConfig.getSections( '/Resources/Sites/%s' % grid ) if not result['OK']: return result sites = result['Value'] for site in sites: opt = gConfig.getOptionsDict( '/Resources/Sites/%s/%s' % ( grid, site ) )['Value'] ces = List.fromChar( opt.get( 'CE', '' ) ) knownCEs += ces return S_OK( knownCEs ) def getSEsFromCS( protocol = 'srm' ): """ Get all the SEs defined in the CS """ knownSEs = {} result = gConfig.getSections( '/Resources/StorageElements' ) if not result['OK']: return result ses = result['Value'] for se in ses: seSection = '/Resources/StorageElements/%s' % se result = gConfig.getSections( seSection ) if not result['OK']: continue accesses = result['Value'] for access in accesses: seProtocol = gConfig.getValue( cfgPath( seSection, access, 'Protocol' ), '' ) if seProtocol.lower() == protocol.lower() or protocol == 'any': host = gConfig.getValue( cfgPath( seSection, access, 'Host' ), '' ) knownSEs.setdefault( host, [] ) knownSEs[host].append( se ) else: continue return S_OK( knownSEs ) def getGridCEs( vo, bdiiInfo = None, ceBlackList = None ): """ Get all the CEs available for a given VO and having queues in Production state """ knownCEs = set() if ceBlackList is not None: knownCEs = knownCEs.union( set( ceBlackList ) ) ceBdiiDict = bdiiInfo if bdiiInfo is None: result = getBdiiCEInfo( vo ) if not result['OK']: return result ceBdiiDict = result['Value'] siteDict = {} for site in ceBdiiDict: siteCEs = set( ceBdiiDict[site]['CEs'].keys() ) newCEs = siteCEs - knownCEs if not newCEs: continue ceFullDict = {} for ce in newCEs: ceDict = {} ceInfo = ceBdiiDict[site]['CEs'][ce] ceType = 'Unknown' ceDict['Queues'] = [] for queue in ceInfo['Queues']: queueStatus = ceInfo['Queues'][queue].get( 'GlueCEStateStatus', 'UnknownStatus' ) if 'production' in queueStatus.lower(): ceType = ceInfo['Queues'][queue].get( 'GlueCEImplementationName', '' ) ceDict['Queues'].append( queue ) if not ceDict['Queues']: continue ceDict['CEType'] = ceType ceDict['GOCSite'] = site ceDict['CEID'] = ce systemName = ceInfo.get( 'GlueHostOperatingSystemName', 'Unknown' ) systemVersion = ceInfo.get( 'GlueHostOperatingSystemVersion', 'Unknown' ) systemRelease = ceInfo.get( 'GlueHostOperatingSystemRelease', 'Unknown' ) ceDict['System'] = ( systemName, systemVersion, systemRelease ) ceFullDict[ce] = ceDict siteDict[site] = ceFullDict result = S_OK( siteDict ) result['BdiiInfo'] = ceBdiiDict return result def getSiteUpdates( vo, bdiiInfo = None, log = None ): """ Get all the necessary updates for the already defined sites and CEs """ def addToChangeSet( entry, changeSet ): _section, _option, value, new_value = entry if new_value and new_value != value: changeSet.add( entry ) if log is None: log = gLogger ceBdiiDict = bdiiInfo if bdiiInfo is None: result = getBdiiCEInfo( vo ) if not result['OK']: return result ceBdiiDict = result['Value'] changeSet = set() gConfig.forceRefresh() for site in ceBdiiDict: result = getDIRACSiteName( site ) if not result['OK']: continue siteNames = result['Value'] for siteName in siteNames: siteSection = cfgPath( '/Resources', 'Sites', siteName.split('.')[0], siteName ) result = gConfig.getOptionsDict( siteSection ) if not result['OK']: continue siteDict = result['Value'] # Current CS values coor = siteDict.get( 'Coordinates', 'Unknown' ) mail = siteDict.get( 'Mail', 'Unknown' ).replace( ' ','' ) description = siteDict.get( 'Description', 'Unknown' ) longitude = ceBdiiDict[site].get( 'GlueSiteLongitude', '' ).strip() latitude = ceBdiiDict[site].get( 'GlueSiteLatitude', '' ).strip() # Current BDII value newcoor = '' if longitude and latitude: newcoor = "%s:%s" % ( longitude, latitude ) newmail = ceBdiiDict[site].get( 'GlueSiteSysAdminContact', '' ).replace( 'mailto:', '' ).strip() newdescription = ceBdiiDict[site].get( 'GlueSiteDescription', '' ).strip() # Adding site data to the changes list addToChangeSet( ( siteSection, 'Coordinates', coor, newcoor ), changeSet ) addToChangeSet( ( siteSection, 'Mail', mail, newmail ), changeSet ) addToChangeSet( ( siteSection, 'Description', description, newdescription ), changeSet ) ces = gConfig.getValue( cfgPath( siteSection, 'CE' ), [] ) for ce in ces: ceSection = cfgPath( siteSection, 'CEs', ce ) ceDict = {} result = gConfig.getOptionsDict( ceSection ) if result['OK']: ceDict = result['Value'] else: if ceBdiiDict[site]['CEs'].get( ce, None ): log.notice( "Adding new CE %s to site %s/%s" % (ce, siteName, site) ) ceInfo = ceBdiiDict[site]['CEs'].get( ce, None ) if ceInfo is None: ceType = ceDict.get( 'CEType', '') continue # Current CS CE info arch = ceDict.get( 'architecture', 'Unknown' ) OS = ceDict.get( 'OS', 'Unknown' ) si00 = ceDict.get( 'SI00', 'Unknown' ) ceType = ceDict.get( 'CEType', 'Unknown' ) ram = ceDict.get( 'HostRAM', 'Unknown' ) submissionMode = ceDict.get( 'SubmissionMode', 'Unknown' ) # Current BDII CE info newarch = ceBdiiDict[site]['CEs'][ce].get( 'GlueHostArchitecturePlatformType', '' ).strip() systemName = ceInfo.get( 'GlueHostOperatingSystemName', '' ).strip() systemVersion = ceInfo.get( 'GlueHostOperatingSystemVersion', '' ).strip() systemRelease = ceInfo.get( 'GlueHostOperatingSystemRelease', '' ).strip() newOS = '' if systemName and systemVersion and systemRelease: newOS = '_'.join( ( systemName, systemVersion, systemRelease ) ) newsi00 = ceInfo.get( 'GlueHostBenchmarkSI00', '' ).strip() newCEType = 'Unknown' for queue in ceInfo['Queues']: queueDict = ceInfo['Queues'][queue] newCEType = queueDict.get( 'GlueCEImplementationName', '' ).strip() if newCEType: break if newCEType=='ARC-CE': newCEType = 'ARC' if newCEType in ['ARC','CREAM']: newSubmissionMode = "Direct" newRAM = ceInfo.get( 'GlueHostMainMemoryRAMSize', '' ).strip() # Adding CE data to the change list addToChangeSet( ( ceSection, 'architecture', arch, newarch ), changeSet ) addToChangeSet( ( ceSection, 'OS', OS, newOS ), changeSet ) addToChangeSet( ( ceSection, 'SI00', si00, newsi00 ), changeSet ) addToChangeSet( ( ceSection, 'CEType', ceType, newCEType ), changeSet ) addToChangeSet( ( ceSection, 'HostRAM', ram, newRAM ), changeSet ) if submissionMode == "Unknown": addToChangeSet( ( ceSection, 'SubmissionMode', submissionMode, newSubmissionMode ), changeSet ) queues = ceInfo['Queues'].keys() for queue in queues: queueSection = cfgPath( ceSection, 'Queues', queue ) queueDict = {} result = gConfig.getOptionsDict( queueSection ) if result['OK']: queueDict = result['Value'] else: log.notice( "Adding new queue %s to CE %s" % (queue, ce) ) queueInfo = ceInfo['Queues'][queue] queueStatus = queueInfo['GlueCEStateStatus'] if queueStatus.lower() != "production": continue # Current CS queue info maxCPUTime = queueDict.get( 'maxCPUTime', 'Unknown' ) si00 = queueDict.get( 'SI00', 'Unknown' ) maxTotalJobs = queueDict.get( 'MaxTotalJobs', 'Unknown' ) # Current BDII queue info newMaxCPUTime = queueInfo.get( 'GlueCEPolicyMaxCPUTime', '' ) newSI00 = '' caps = queueInfo['GlueCECapability'] if type( caps ) == type( '' ): caps = [caps] for cap in caps: if 'CPUScalingReferenceSI00' in cap: newSI00 = cap.split( '=' )[-1] # Adding queue info to the CS addToChangeSet( ( queueSection, 'maxCPUTime', maxCPUTime, newMaxCPUTime ), changeSet ) addToChangeSet( ( queueSection, 'SI00', si00, newSI00 ), changeSet ) if maxTotalJobs == "Unknown": newTotalJobs = min( 1000, int( int( queueInfo.get( 'GlueCEInfoTotalCPUs', 0 ) )/2 ) ) newWaitingJobs = max( 2, int( newTotalJobs * 0.1 ) ) newTotalJobs = str( newTotalJobs ) newWaitingJobs = str( newWaitingJobs ) addToChangeSet( ( queueSection, 'MaxTotalJobs', '', newTotalJobs ), changeSet ) addToChangeSet( ( queueSection, 'MaxWaitingJobs', '', newWaitingJobs ), changeSet ) # Updating eligible VO list VOs = set() if queueDict.get( 'VO', '' ): VOs = set( [ q.strip() for q in queueDict.get( 'VO', '' ).split( ',' ) if q ] ) if not vo in VOs: VOs.add( vo ) VOs = list( VOs ) newVOs = ','.join( VOs ) addToChangeSet( ( queueSection, 'VO', '', newVOs ), changeSet ) return S_OK( changeSet ) def getGridSEs( vo, bdiiInfo = None, seBlackList = None ): """ Get all the SEs available for a given VO """ seBdiiDict = bdiiInfo if bdiiInfo is None: result = getBdiiSEInfo( vo ) if not result['OK']: return result seBdiiDict = result['Value'] knownSEs = set() if seBlackList is not None: knownSEs = knownSEs.union( set( seBlackList ) ) siteDict = {} for site in seBdiiDict: for gridSE in seBdiiDict[site]['SEs']: seDict = seBdiiDict[site]['SEs'][gridSE] #if "lhcb" in seDict['GlueSAName']: # print '+'*80 # print gridSE # for k,v in seDict.items(): # print k,'\t',v if not gridSE in knownSEs: siteDict.setdefault( site, {} ) if type( seDict['GlueSAAccessControlBaseRule'] ) == types.ListType: voList = [ re.sub( '^VO:', '', s ) for s in seDict['GlueSAAccessControlBaseRule'] ] else: voList = [ re.sub( '^VO:', '', seDict['GlueSAAccessControlBaseRule'] ) ] siteDict[site][gridSE] = { 'GridSite': seDict['GlueSiteUniqueID'], 'BackendType': seDict['GlueSEImplementationName'], 'Description': seDict.get( 'GlueSEName', '-' ), 'VOs': voList } result = S_OK( siteDict ) result['BdiiInfo'] = seBdiiDict return result def getGridSRMs( vo, bdiiInfo = None, srmBlackList = None, unUsed = False ): result = ldapService( serviceType = 'SRM', vo = vo ) if not result['OK']: return result srmBdiiDict = result['Value'] knownSRMs = set() if srmBlackList is not None: knownSRMs = knownSRMs.union( set( srmBlackList ) ) siteSRMDict = {} for srm in srmBdiiDict: endPoint = srm.get( 'GlueServiceEndpoint', '') srmHost = '' if endPoint: result = pfnparse( endPoint ) if not result['OK']: continue srmHost = result['Value']['Host'] if not srmHost: continue if srmHost in knownSRMs: continue if unUsed: result = getDIRACSesForSRM( srmHost ) if not result['OK']: return result diracSEs = result['Value'] if diracSEs: # If it is a known SRM and only new SRMs are requested, continue continue site = srm.get( 'GlueForeignKey', '' ).replace( 'GlueSiteUniqueID=', '' ) siteSRMDict.setdefault( site, {} ) siteSRMDict[site][srmHost] = srm if bdiiInfo is None: result = getBdiiSEInfo( vo ) if not result['OK']: return result seBdiiDict = result['Value'] else: seBdiiDict = bdiiInfo srmSeDict = {} for site in siteSRMDict: srms = siteSRMDict[site].keys() for srm in srms: if seBdiiDict.get( site, {} ).get( 'SEs', {} ).get( srm, {} ): srmSeDict.setdefault( site, {} ) srmSeDict[site].setdefault( srm, {} ) srmSeDict[site][srm]['SRM'] = siteSRMDict[site][srm] srmSeDict[site][srm]['SE'] = seBdiiDict[site]['SEs'][srm] return S_OK( srmSeDict ) def getSRMUpdates( vo, bdiiInfo = None ): changeSet = set() def addToChangeSet( entry, changeSet ): _section, _option, value, new_value = entry if new_value and new_value != value: changeSet.add( entry ) result = getGridSRMs( vo, bdiiInfo = bdiiInfo ) if not result['OK']: return result srmBdiiDict = result['Value'] result = getSEsFromCS() if not result['OK']: return result seDict = result['Value'] result = getVOs() if result['OK']: csVOs = set( result['Value'] ) else: csVOs = set( [vo] ) for seHost, diracSE in seDict.items(): seSection = '/Resources/StorageElements/%s' % diracSE[0] # Look up existing values first description = gConfig.getValue( cfgPath( seSection, 'Description'), 'Unknown' ) backend = gConfig.getValue( cfgPath( seSection, 'BackendType'), 'Unknown' ) vos = gConfig.getValue( cfgPath( seSection, 'VO'), 'Unknown' ).replace( ' ','' ) size = gConfig.getValue( cfgPath( seSection, 'TotalSize'), 'Unknown' ) # Look up current BDII values srmDict = {} seBdiiDict = {} for site in srmBdiiDict: if seHost in srmBdiiDict[site]: srmDict = srmBdiiDict[site][seHost]['SRM'] seBdiiDict = srmBdiiDict[site][seHost]['SE'] break if not srmDict or not seBdiiDict: continue newDescription = seBdiiDict.get( 'GlueSEName', 'Unknown' ) newBackend = seBdiiDict.get( 'GlueSEImplementationName', 'Unknown' ) newSize = seBdiiDict.get( 'GlueSESizeTotal', 'Unknown' ) addToChangeSet( ( seSection, 'Description', description, newDescription ), changeSet ) addToChangeSet( ( seSection, 'BackendType', backend, newBackend ), changeSet ) addToChangeSet( ( seSection, 'TotalSize', size, newSize ), changeSet ) # Evaluate VOs if no space token defined, otherwise this is VO specific spaceToken = '' for i in range( 1, 10 ): protocol = gConfig.getValue( cfgPath( seSection, 'AccessProtocol.%d' % i, 'Protocol' ), '' ) if protocol.lower() == 'srm': spaceToken = gConfig.getValue( cfgPath( seSection, 'AccessProtocol.%d' % i, 'SpaceToken' ), '' ) break if not spaceToken: bdiiVOs = srmDict.get( 'GlueServiceAccessControlBaseRule', [] ) bdiiVOs = set( [ re.sub( '^VO:', '', rule ) for rule in bdiiVOs ] ) seVOs = csVOs.intersection( bdiiVOs ) newVOs = ','.join( seVOs ) addToChangeSet( ( seSection, 'VO', vos, newVOs ), changeSet ) return S_OK( changeSet )
rajanandakumar/DIRAC
ConfigurationSystem/Client/Utilities.py
Python
gpl-3.0
16,656
[ "DIRAC" ]
64dfbe62da2fe879664274bd36119a86e7e777912895a595cf8ba672d468d4be
import datetime as dt import numpy as np def ncgettypecode(dtype): ''' purpose: netcdf-typecode from array-dtype ''' if ((dtype == np.dtype('float32')) or (dtype == 'float32')): return 'f' elif ((dtype == np.dtype('float64')) or (dtype == 'float64')): return 'd' elif ((dtype == np.dtype('int32')) or (dtype == 'int32')): return 'i' elif ((dtype == np.dtype('int64')) or (dtype == 'int64')): return 'l' def ncgetdatetime(ncin): ''' extract datetimes from the 'time' coordinate in ncin ncin: input netcdf file returns an array of datetimes ''' tunits = getattr(ncin.variables['time'],'units').split() if tunits[0] == 'days': mul = 24.*3600. elif tunits[0] == 'hours': mul = 3600. elif tunits[0] == 'minutes': mul = 60. elif tunits[0] == 'seconds': mul = 1. else: raise Exception("no time conversion found for '"+tunits[0],+"'") try: refdat = dt.datetime.strptime(tunits[2]+' '+tunits[3],"%Y-%m-%d %H:%M:%S") except: refdat = dt.datetime.strptime(tunits[2]+' '+tunits[3],"%Y-%m-%d %H:%M") return [(refdat+ dt.timedelta(seconds=e*mul)) for e in ncin.variables['time'][:]] #return(multifunc(lambda x: refdat + timedelta(seconds=x[0]), [ncin.variables['time'][:]*mul],[False],[[0]])[0])
hendrikwout/pynacolada
old/pynacolada/ncdfextract.py
Python
gpl-3.0
1,372
[ "NetCDF" ]
9a2843d42d83989bdee5b85b8b1c31f3fb149e5e54043053632ef10512563a93
# -*- coding: utf-8 -*- { "'Cancel' will indicate an asset log entry did not occur": "'cancelar' irá indicar que a entrada de log de ativo não ocorreu", "A location that specifies the geographic area for this region. This can be a location from the location hierarchy, or a 'group location', or a location that has a boundary for the area.": 'Um local que especifica a área geográfica dessa região. Este pode ser um local a partir da hierarquia local, ou um "grupo local", ou um local que tem um limite para a área.', "Acronym of the organization's name, eg. IFRC.": 'Acrônimo do nome da organização, por exemplo, FICV.', "Authenticate system's Twitter account": 'Sistema de Autenticação para conta de Twitter', "Can't import tweepy": 'Não pode importar tweepy', "Caution: doesn't respect the framework rules!": 'Cuidado: não respeitar as regras de enquadramento!', "Format the list of attribute values & the RGB value to use for these as a JSON object, e.g.: {Red: '#FF0000', Green: '#00FF00', Yellow: '#FFFF00'}": "Formatar a lista de valores de atributos & o valor RGB a ser usado para esses como o objeto JSON, Exemplo: {Red: '#FF0000, Green: '#00FF00', Yellow: '#FFFF00'}", "If selected, then this Asset's Location will be updated whenever the Person's Location is updated.": 'Se selecionado, esta localização do ativo será atualizado sempre que a localização da pessoa é atualizada.', "If this configuration represents a region for the Regions menu, give it a name to use in the menu. The name for a personal map configuration will be set to the user's name.": 'Se esta configuração representa uma região para o menu regiões, dê-lhe um nome a ser utilizado no menu. O nome de uma configuração pessoal do mapa será configurado para o nome do usuário.', "If this field is populated then a user who specifies this Organization when signing up will be assigned as a Staff of this Organization unless their domain doesn't match the domain field.": 'Se esse campo for Preenchido, então, um usuário que especificar esta organização quando se registrar será designado como um agente desta organização a menos que seu domínio não corresponde ao campo de domínio.', "If this is ticked, then this will become the user's Base Location & hence where the user is shown on the Map": 'Se isso for ticado, se tornará a base geográfica do usuário e, consequentemente onde este aparece no mapa.', "If you don't see the Hospital in the list, you can add a new one by clicking link 'Create Hospital'.": "Se você não vê o Hospital na lista, você pode incluir um novo clicando no link 'Criar Hospital'.", "If you don't see the Office in the list, you can add a new one by clicking link 'Create Office'.": "Se você não vê o escritório na lista, você pode incluir um novo clicando no link 'Criar escritório'.", "If you don't see the Organization in the list, you can add a new one by clicking link 'Create Organization'.": 'Se voce não vê a Organização na lista, voce poderá adicionar uma nova clicando no link "Criar Organização"', "Instead of automatically syncing from other peers over the network, you can also sync from files, which is necessary where there's no network. You can use this page to import sync data from files and also export data to sync files. Click the link on the right to go to this page.": 'Em vez de sincronizar automaticamente com outros pares pela rede, voce também pode sincronizar com arquivos, o que é necessário quando não há rede. Você pode utilizar esta página para importar dados de sincronização de arquivos e também exportar dados para arquivos de Sincronização. Clique no link à direita para ir para esta página.', "Level is higher than parent's": 'Nível superior ao dos pais', "Need a 'url' argument!": "Precisa de um argumento ' url!", "Optional. The name of the geometry column. In PostGIS this defaults to 'the_geom'.": "opcional O nome da coluna de geometria. Em PostGIS padroniza para 'the_geom'.", "Parent level should be higher than this record's level. Parent level is": 'Nível dos pais deve ser maior que o nível do registro. Nível do Pai é', "Password fields don't match": 'Os campos de senha não são iguais.', "Phone number to donate to this organization's relief efforts.": 'Número de telefone para doar ao serviço de assistência social desta organização', "Please come back after sometime if that doesn't help.": 'Por favor, volte após algum tempo se isso não ajuda.', "Quantity in %s's Inventory": 'Quantidade de %s do Inventário', "Select a Room from the list or click 'Create Room'": "Escolha uma sala da lista ou clique 'Criar sala'", "Select a person in charge for status 'assigned'": "Selecione uma pessoa responsável para status 'DESIGNADO'", "Select this if all specific locations need a parent at the deepest level of the location hierarchy. For example, if 'district' is the smallest division in the hierarchy, then all specific locations would be required to have a district as a parent.": "Selecione isto se todas as localidades especificas precisarem de um pai no nível mais alto da hierarquia. Por exemplo, se 'distrito' é a menor divisão na hierarquia e, em seguida, todos os locais específicos seriam obrigados a ter um distrito como um pai.", "Select this if all specific locations need a parent location in the location hierarchy. This can assist in setting up a 'region' representing an affected area.": 'Selecione isto se todos os locais específicos de uma posição pai na hierarquia do local. Isso pode ajudar na configuração de uma "região" representando uma área afetada.', "Sorry, things didn't get done on time.": 'Desculpe ! As tarefas não foram concluídas em tempo útil.', "Sorry, we couldn't find that page.": 'Desculpe, não foi possível localizar essa página.', "System's Twitter account updated": 'DO SISTEMA Chilreiam conta ATUALIZADO', "The Donor(s) for this project. Multiple values can be selected by holding down the 'Control' key.": "O doador(s) para este projeto. Vários valores podem ser selecionados ao manter pressionado a chave 'control'", "The URL of the image file. If you don't upload an image file, then you must specify its location here.": 'A URL do arquivo de imagem. Se voce não fizer o upload de um arquivo de imagem, então voce deverá especificar sua localização aqui.', "To search by person name, enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": "Para pesquisar por nome, digite qualquer do primeiro, meio ou últimos nomes, separados por espaços. Pode utilizar o % como um substituto para qualquer caracter. PRESSIONE ' Search' sem entrada para listar todas as pessoas.", "To search for a body, enter the ID tag number of the body. You may use % as wildcard. Press 'Search' without input to list all bodies.": "Para procurar um corpo, digite o número da ID do corpo. Pode utilizar o % como um substituto para qualquer caracter. PRESSIONE ' Search' sem entrada para listar todos os organismos.", "To search for a hospital, enter any of the names or IDs of the hospital, or the organization name or acronym, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all hospitals.": "Para procurar um hospital, digite qualquer um dos nomes ou IDs do hospital, ou o nome da organização ou Acrônimo, separados por espaços. Pode utilizar o % como um substituto para qualquer caracter. PRESSIONE ' Search' sem entrada para listar todos os hospitais.", "To search for a hospital, enter any of the names or IDs of the hospital, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all hospitals.": "Para procurar um hospital, digite qualquer um dos nomes ou IDs do hospital, separados por espaços. Pode utilizar o % como um substituto para qualquer caracter. PRESSIONE ' Search' sem entrada para listar todos os hospitais.", "To search for a location, enter the name. You may use % as wildcard. Press 'Search' without input to list all locations.": "Para procurar um local, digite o nome. Pode utilizar o % como um substituto para qualquer caracter. PRESSIONE ' Search' sem entrada para listar todos os locais.", "To search for a person, enter any of the first, middle or last names and/or an ID number of a person, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": "Para procurar por uma pessoa, digite qualquer do primeiro, meio ou últimos nomes e/ou um número de ID de uma pessoa, separados por espaços. Pode utilizar o % como um substituto para qualquer caracter. PRESSIONE ' Search' sem entrada para listar todas as pessoas.", "To search for a person, enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": "Para procurar por uma pessoa, digite ou o primeiro nome, ou o nome do meio ou sobrenome, separados por espaços. Pode utilizar o % como um substituto para qualquer caracter. PRESSIONE ' Search' sem entrada para listar todas as pessoas.", "To search for an assessment, enter any portion the ticket number of the assessment. You may use % as wildcard. Press 'Search' without input to list all assessments.": "Para procurar por uma avaliação, digite qualquer parte o número da permissão da avaliação. Pode utilizar o % como um substituto para qualquer caracter. PRESSIONE ' Search' sem entrada para listar todas as avaliações.", "Type the first few characters of one of the Person's names.": 'Digite os primeiros caracteres de um dos nomes da pessoa.', "Upload an image file here. If you don't upload an image file, then you must specify its location in the URL field.": 'Fazer atualizacao de um arquivo de imagem aqui. Se voce não fizer o upload de um arquivo de imagem, então voce deverá especificar sua localização no campo URL', "View/Edit the Database directly (caution: doesn't respect the framework rules!)": 'Visualizar/Alterar a base de dados directamente ( cuidado : não cumpre com as regras da infraestrutura ! ) ).', "When syncing data with others, conflicts happen in cases when two (or more) parties want to sync information which both of them have modified, i.e. conflicting information. Sync module tries to resolve such conflicts automatically but in some cases it can't. In those cases, it is up to you to resolve those conflicts manually, click on the link on the right to go to this page.": 'Quando Sincronizando dados com outros, os conflitos acontecem em casos onde dois (ou mais) grupos desejam sincronizar informações que os dois tenham modificado, ou seja, informações conflitantes. Módulo de sincronização tenta resolver esses conflitos automaticamente mas em alguns casos isso não consegue. Nesses casos, cabe a si resolver esses conflitos manualmente, clique no link à direita para ir para esta página.', "You haven't made any calculations": 'Não fez quaisquer cálculos.', "couldn't be parsed so NetworkLinks not followed.": 'Não pôde ser analisado então o NetworkLinks não seguiu.', "includes a GroundOverlay or ScreenOverlay which aren't supported in OpenLayers yet, so it may not work properly.": 'Inclui um GroundOverlay ou ScreenOverlay que não são ainda suportados em OpenLayuers, portanto poderá não funcionar na totalidade.', '"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" é uma expressão opcional como "field1=\'newvalue\'". Não é possível atualizar ou excluir os resultados de uma junção', '# of International Staff': '# De equipe internacional', '# of National Staff': '# De equipe nacional', '# of Vehicles': '# De Veículos', '%(msg)s\nIf the request type is "%(type)s", please enter the %(type)s on the next screen.': '%(msg)s\nSe o tipo de pedido é "%(type)s", digite a %(type)s na próxima tela.', '%(system_name)s - Verify Email': '%(system_name)s - Verificar E-Mail', '%s Create a new site or ensure that you have permissions for an existing site.': '%s Cria um novo site ou garante que você tenha permissões para um site existente.', '%s rows deleted': '%s linhas excluídas', '%s rows updated': '%s linhas atualizadas', '& then click on the map below to adjust the Lat/Lon fields': 'Em seguida selecione o mapa abaixo para ajustar os campos Lat/Lon', '* Required Fields': '* campos obrigatórios', '0-15 minutes': '0-15 minutos', '1 Assessment': '1 Avaliação', '1 location, shorter time, can contain multiple Tasks': '1 Local, menos tempo, pode conter várias Tarefas', '1-3 days': '1 a 3 dias', '15-30 minutes': '15 a 30 minutos', '2 different options are provided here currently:': '2 opções diferentes são fornecidos aqui atualmente:', '2x4 Car': 'Carro 2x4', '30-60 minutes': '30-60 minutos', '4-7 days': '4-7 Dias', '4x4 Car': 'Carro 4x4', '8-14 days': '8-14 Dias', 'A Marker assigned to an individual Location is set if there is a need to override the Marker assigned to the Feature Class.': 'Um marcador assinalado para um local individual é configurado se há a necessidade de substituir um marcador assinalado para o Recurso Classe.', 'A Reference Document such as a file, URL or contact person to verify this data. You can type the 1st few characters of the document name to link to an existing document.': 'Um documento de referência como um arquivo, URL ou contacto pessoal para verificar esses dados. Pode inserir as primeiras letras do nome dum documento para chegar a esse documento.', 'A brief description of the group (optional)': 'Uma descrição breve do grupo (opcional)', 'A file downloaded from a GPS containing a series of geographic points in XML format.': 'Um ficheiro descarregado de um GPS contendo uma série de pontos geográficos em formato XML.', 'A file in GPX format taken from a GPS whose timestamps can be correlated with the timestamps on the photos to locate them on the map.': 'Um ficheiro em formato GPX retirado de um GPS cujas datas e horas podem ser correlacionadas com as de fotografias para localização num mapa.', 'A library of digital resources, such as photos, documents and reports': 'Uma biblioteca de recursos digitais, como fotos, documentos e relatórios', 'A location group can be used to define the extent of an affected area, if it does not fall within one administrative region.': 'Um grupo local pode ser usado para definir a extensão de uma área afetada, se não cair dentro de uma região administrativa.', 'A location group is a set of locations (often, a set of administrative regions representing a combined area). Member locations are added to a location group here. Location groups may be used to filter what is shown on the map and in search results to only entities covered by locations in the group. A location group can be used to define the extent of an affected area, if it does not fall within one administrative region. Location groups can be used in the Regions menu.': 'Um grupo de localização é um conjunto de locais (muitas vezes, um conjunto de regiões administrativas que representam uma área Combinada). Membros locais são adicionados em grupos locais aqui. Grupos locais podem ser utilizados para filtrar o que é mostrado no mapa e nos resultados da procura apenas as entidades locais abrangidas no grupo. Um grupo local pode ser usado para definir a extensão de uma área afetada, se não cair dentro de uma região administrativa. Grupos local pode ser utilizado no menu Regiões.', 'A location group is a set of locations (often, a set of administrative regions representing a combined area).': 'Um grupo de localização é um conjunto de locais (muitas vezes, um conjunto de regiões administrativas que representam uma área Combinada).', 'A location group must have at least one member.': 'Um grupo de localização deve ter, pelo menos, um membro.', 'A survey series with id %s does not exist. Please go back and create one.': 'Id% não foi encontrado na pesquisa. Por favor voltar e crie um.', 'ABOUT THIS MODULE': 'SOBRE ESTE MÓDULO', 'ACCESS DATA': 'Dados de Acesso', 'ANY': 'Todos', 'API is documented here': 'API está documentado aqui', 'ATC-20 Rapid Evaluation modified for New Zealand': 'ATC-20 Rápida Avaliação modificado para a Nova Zelândia', 'Abbreviation': 'Abreviatura', 'Ability to Fill Out Surveys': 'Capacidade para preencher Inquéritos', 'Ability to customize the list of details tracked at a Shelter': 'Capacidade de Customizar a lista de detalhes rastreados em um Abrigo', 'Ability to customize the list of human resource tracked at a Shelter': 'Capacidade de Customizar a lista de recursos humanos Rastreados em um Abrigo', 'Ability to customize the list of important facilities needed at a Shelter': 'Capacidade de Customizar a lista das instalações importante necessária em um Abrigo', 'Ability to view Results of Completed and/or partially filled out Surveys': 'Capacidade para visualizar resultados de Concluída e/ou parcialmente preenchido Pesquisas', 'About Sahana': 'Sobre Sahana', 'About': 'sobre', 'Access denied': 'Acesso negado', 'Access to Shelter': 'Acesso a Abrigo', 'Access to education services': 'Acesso a serviços de educação', 'Accessibility of Affected Location': 'Acessibilidade do Local Afectado', 'Account Registered - Please Check Your Email': 'Conta registrada - verifique seu e-mail', 'Account registered, however registration is still pending approval - please wait until confirmation received.': 'Conta registrada, mas registro pende aprovação - por favor aguarde até confirmação ser recebida.', 'Acronym': 'Iniciais', 'Actionable by all targeted recipients': 'Acionáveis por todos os destinatários de destino', 'Actionable only by designated exercise participants; exercise identifier SHOULD appear in <note>': 'Acionáveis apenas pelos participantes exercício designado; Identificação do excercício deve aparecer em', 'Actioned?': 'Acionado?', 'Actions taken as a result of this request.': 'Ações tomadas como resultado desse pedido.', 'Actions': 'Ações', 'Activate Events from Scenario templates for allocation of appropriate Resources (Human, Assets & Facilities).': 'Ativar eventos dos templates de cenário para alocação adequada de recursos (humanos, ativos e equipamentos)', 'Active Problems': 'Problemas ativos', 'Active': 'ativo', 'Activities matching Assessments:': 'Atividades correspondentes a Avaliações:', 'Activities of boys 13-17yrs before disaster': 'Atividades de garotos 13-17 anos antes do desastre', 'Activities of boys 13-17yrs now': 'Atividades de garotos 13-17yrs agora', 'Activities of boys <12yrs before disaster': 'Atividades de garotos <12 anos antes do desastre', 'Activities of boys <12yrs now': 'Atividades de garotos <12 anos agora', 'Activities of children': 'Atividades de crianças', 'Activities of girls 13-17yrs before disaster': 'Atividades de meninas 13-17yrs antes de desastres', 'Activities of girls 13-17yrs now': 'Atividades de meninas 13-17yrs agora', 'Activities of girls <12yrs before disaster': 'Atividades de meninas <12yrs antes de desastres', 'Activities of girls <12yrs now': 'Agora atividades de meninas de menos de 12 anos', 'Activities': 'Atividades', 'Activity Added': 'Atividade Incluída', 'Activity Deleted': 'Atividade Apagada', 'Activity Details': 'Detalhes da Atividade', 'Activity Report': 'Relatório de atividades', 'Activity Reports': 'Relatório de Atividades', 'Activity Type': 'Tipo de atividade', 'Activity Updated': 'Atividade Atualizada', 'Activity': 'atividade', 'Add Activity Type': 'Incluir tipo de atividade', 'Add Address': 'Incluir Endereço', 'Add Alternative Item': 'Incluir item alternativo', 'Add Assessment Summary': 'Incluir Avaliação De Resumo', 'Add Assessment': 'Incluir Avaliação', 'Add Asset Log Entry - Change Label': 'Incluir recurso de entrada de entrada - trocar a Etiqueta', 'Add Availability': 'Incluir Disponibilidade', 'Add Baseline Type': 'Incluir Linha De Tipo', 'Add Baseline': 'Incluir Linha', 'Add Bundle': 'Incluir Pacote Configurável', 'Add Camp Service': 'Incluir acampamento de serviço', 'Add Camp Type': 'Incluir tipo de acampamento', 'Add Camp': 'Incluir acampamento', 'Add Certificate for Course': 'Incluir Certificado de Curso', 'Add Certification': 'Adicionar Certificação', 'Add Competency': 'incluir competência', 'Add Contact': 'Criar contato', 'Add Contact Information': 'Incluir informações de contato', 'Add Credential': 'Incluir referência', 'Add Credentials': 'Incluir Referências', 'Add Disaster Victims': 'Incluir Vítimas de Desastre', 'Add Distribution.': 'Incluir distribuição.', 'Add Donor': 'Incluir doador', 'Add Flood Report': 'Incluir Relatório Enchente', 'Add Group Member': 'Incluir Membro do Grupo', 'Add Human Resource': 'Incluir Recurso Humano', 'Add Identity': 'Incluir Identidade', 'Add Image': 'Incluir Imagem', 'Add Impact Type': 'Incluir Tipo De Impacto', 'Add Impact': 'Adicionar Impacto', 'Add Inventory Item': 'Inclúir item de inventário', 'Add Item to Catalog': 'Incluir Item no Catálogo', 'Add Item to Commitment': 'Incluir Item no Compromisso', 'Add Item to Inventory': 'Incluir Item de Inventário', 'Add Item to Request': 'Incluir Item para pedido', 'Add Item to Shipment': 'Adicionar Item para Embarque', 'Add Item': 'Incluir item', 'Add Job Role': 'Incluir tarefa Função', 'Add Key': 'Incluir Chave', 'Add Kit': 'Adicionar Kit', 'Add Level 1 Assessment': 'Incluir nível de Avaliação 1', 'Add Level 2 Assessment': 'Incluir nível de Avaliação 2', 'Add Log Entry': 'Incluir Entrada de Log', 'Add Member': 'Incluir Membro', 'Add Membership': 'Incluir Associação', 'Add Message': 'Incluir Mensagem', 'Add Mission': 'Incluir Missão', 'Add Need Type': 'Adicionar o tipo Necessário', 'Add Need': 'Incluir o necessário', 'Add New Assessment Summary': 'Incluir novo Resumo de Avaliação', 'Add New Baseline Type': 'Incluir novo tipo de linha de base', 'Add New Baseline': 'Incluir nova linha de base', 'Add New Budget': 'Adicionar Novo Orçamento', 'Add New Bundle': 'Incluir Novo Pacote', 'Add New Camp Service': 'Inlcuir Novo Campo de Serviço', 'Add New Camp Type': 'Incluir Novo Campo de Tipo', 'Add New Camp': 'Incluir novo Campo', 'Add New Cluster Subsector': 'Adicionar novo subgrupo', 'Add New Cluster': 'Adicionar novo grupo', 'Add New Commitment Item': 'Incluir novo item de compromisso', 'Add New Document': 'Incluir Novo Documento', 'Add New Donor': 'Adicionar novo doador', 'Add New Entry': 'Incluir Nova Entrada', 'Add New Event': 'Adicionar novo evento', 'Add New Flood Report': 'Adicionar novo relatório de cheias', 'Add New Human Resource': 'Incluir novos recursos humanos', 'Add New Image': 'Adicionar nova imagem', 'Add New Impact Type': 'Incluir novo Tipo De Impacto', 'Add New Impact': 'Adicionar novo impacto', 'Add New Inventory Item': 'Incluir novo Item De Inventário', 'Add New Item to Kit': 'Incluir novo Item de Kit', 'Add New Key': 'Adicionar Nova Chave', 'Add New Level 1 Assessment': 'Incluir novo nível 1 avaliação', 'Add New Level 2 Assessment': 'Incluir novo nível 2 avaliação', 'Add New Member': 'Incluir Novo Membro', 'Add New Membership': 'Incluir novo membro', 'Add New Need Type': 'Incluir novo Tipo Necessário', 'Add New Need': 'Adicionar novas necessidades', 'Add New Note': 'Adicionar NOVA NOTA', 'Add New Population Statistic': 'Incluir nova População De Estatística', 'Add New Problem': 'Incluir novo Problema', 'Add New Rapid Assessment': 'Incluir nova Avaliação Rápida', 'Add New Received Item': 'Incluir novo Item Recebido', 'Add New Record': 'Incluir Novo Registro', 'Add New Request Item': 'Incluir novo Item de Pedido', 'Add New Request': 'Incluir novo pedido', 'Add New River': 'Incluir novo Rio', 'Add New Role to User': 'Incluir nova função para o usuário', 'Add New Scenario': 'Adicionar Novo cenário', 'Add New Sent Item': 'Incluir novo Item Enviado', 'Add New Setting': 'Adicionar Nova Configuração', 'Add New Solution': 'Adicionar nova solução', 'Add New Staff Type': 'Incluir novo tipo de equipe.', 'Add New Staff': 'Adicionar Nova Equipe', 'Add New Subsector': 'Incluir novo Subsector', 'Add New Survey Answer': 'Incluir nova resposta na pesquisa.', 'Add New Survey Question': 'Incluir nova pergunta na pesquisa.', 'Add New Survey Section': 'Incluir nova seção na pesquisa.', 'Add New Survey Series': 'Incluir nova série na pesquisa.', 'Add New Survey Template': 'Incluir novo Modelo De Pesquisa', 'Add New Team': 'Adicionar nova equipe', 'Add New Ticket': 'Incluir nova permissão', 'Add New Track': 'Adicionar Nova Pista', 'Add New User to Role': 'Adicionar Novo usuário para Função', 'Add New': 'Incluir novo', 'Add Note': 'Incluir nota', 'Add Peer': 'Incluír Par', 'Add Person': 'incluir pessoa', 'Add Photo': 'Incluir Foto', 'Add Population Statistic': 'Incluir População Estatística', 'Add Position': 'Adicionar Posição', 'Add Problem': 'Adicionar Problema', 'Add Question': 'Adicionar Pergunta', 'Add Rapid Assessment': 'Adicionar Avaliação Rápida', 'Add Record': 'Incluir Registro', 'Add Reference Document': 'Incluir documento de referência', 'Add Report': 'Incluir Relatório', 'Add Request': 'Incluir Pedido', 'Add Section': 'Incluir Secção', 'Add Setting': 'Adicionar Definição', 'Add Skill Equivalence': 'Incluir equivalência de habilidades', 'Add Skill Provision': 'Incluir provisão de habilidades', 'Add Solution': 'Incluir Solução', 'Add Staff Type': 'Incluir tipo de equipe', 'Add Staff': 'Incluir equipe', 'Add Subscription': 'Incluir Assinatura', 'Add Subsector': 'Incluir Subsetor', 'Add Survey Answer': 'Incluir resposta de pesquisa', 'Add Survey Question': 'Adicionar pergunta da pesquisa', 'Add Survey Section': 'Incluir seção da pesquisa', 'Add Survey Series': 'Incluir série da pesquisa', 'Add Survey Template': 'Incluir Modelo De Pesquisa', 'Add Team Member': 'Incluir membro', 'Add Team': 'Incluir equipe', 'Add Ticket': 'Adicionar Bilhete', 'Add Training': 'Incluir Treinamento', 'Add Unit': 'Incluir Unidade', 'Add Volunteer Availability': 'Incluir disponibilidade do voluntário', 'Add a Reference Document such as a file, URL or contact person to verify this data. If you do not enter a Reference Document, your email will be displayed instead.': 'Adicionar um documento de referência como um arquivo, URL ou contacto pessoal para verificar esses dados. Se você não inserir um documento de referência, seu e-mail será exibido no lugar.', 'Add a Volunteer': 'Incluir um Voluntário', 'Add a new certificate to the catalog.': 'Incluir um novo certificado no catálogo.', 'Add a new competency rating to the catalog.': 'Adicionar uma classificação nova competência para o catálogo.', 'Add a new course to the catalog.': 'Adicionar um novo rumo para o catálogo.', 'Add a new job role to the catalog.': 'Incluir uma função nova tarefa para o catálogo.', 'Add a new skill provision to the catalog.': 'Incluir uma disposição nova habilidade para o catálogo.', 'Add a new skill to the catalog.': 'Incluir uma nova habilidade para o catálogo.', 'Add a new skill type to the catalog.': 'Incluir um tipo novo de hailidade para o catálogo.', 'Add new Group': 'Adicionar novo grupo', 'Add new Individual': 'Incluir novo indivíduo', 'Add new project.': 'Adicionar novo projeto.', 'Add new staff role.': 'Incluir função de novos funcionários.', 'Add staff members': 'Incluir membros da equipe', 'Add to Bundle': 'Incluir no Pacote Configurável', 'Add to budget': 'Incluir no orçamento', 'Add volunteers': 'Incluir voluntários', 'Add': 'incluir', 'Add/Edit/Remove Layers': 'Incluir/editar/remover camadas', 'Added to Group': 'Associação incluído', 'Added to Team': 'Associação incluído', 'Additional Beds / 24hrs': 'Camas adicionais / 24 horas', 'Address Details': 'Detalhes do Endereço', 'Address Type': 'Tipo de Endereço', 'Address added': 'Endereço incluído', 'Address deleted': 'Endereço excluído', 'Address updated': 'Endereço actualizado', 'Address': 'endereços', 'Addresses': 'Endereços', 'Adequate food and water available': 'Comida e água adequado disponível', 'Adequate': 'adequar', 'Admin Email': 'email do administrador', 'Admin Name': 'nome do administrador', 'Admin Tel': 'Telefone do administrador', 'Administration': 'administração', 'Admissions/24hrs': 'admissões/24 horas', 'Adolescent (12-20)': 'adolescente (12-20)', 'Adolescent participating in coping activities': 'Adolescente participando em actividades de superação', 'Adult (21-50)': 'Adulto (21-50)', 'Adult ICU': 'UTI para adultos', 'Adult Psychiatric': 'Psiquiátrico para adultos', 'Adult female': 'Mulher adulta', 'Adult male': 'Homem adulto', 'Adults in prisons': 'Adultos em prisões', 'Advanced:': 'Avançado:', 'Advisory': 'Aconselhamento', 'After clicking on the button, a set of paired items will be shown one by one. Please select the one solution from each pair that you prefer over the other.': 'Depois de pressionar o botão será mostrado um conjunto de dois elementos, um de cada vez. Por favor selecione a uma solução de cada par de sua preferência sobre o outro.', 'Age Group': 'Grupo etário', 'Age group does not match actual age.': 'Grupo etário não corresponde à idade real.', 'Age group': 'Grupo etário', 'Aggravating factors': 'Fatores agravantes', 'Agriculture': 'Agricultura', 'Air Transport Service': 'Serviço de Transporte Aéreo', 'Aircraft Crash': 'Despenho de Avião', 'Aircraft Hijacking': 'Sequestro de Avião', 'Airport Closure': 'Encerramento de Aeroporto', 'Airspace Closure': 'Encerramento de Espaço Aéreo', 'Alcohol': 'álcool', 'Alert': 'Alertar', 'All Inbound & Outbound Messages are stored here': 'Todas as mensagens enviadas e recebidas são armazenados aqui', 'All Resources': 'Todos os recursos', 'All data provided by the Sahana Software Foundation from this site is licenced under a Creative Commons Attribution licence. However, not all data originates here. Please consult the source field of each entry.': 'Todos os dados fornecidos pelos Sahana Software Foundation a partir deste site é licenciado sob uma Licença Atribuição Comuns criativos. No entanto, nem todos os dados se origina aqui. Por favor consulte o campo de origem de cada entrada.', 'All': 'Tudo', 'Allowed to push': 'Permissão para pressionar', 'Allows a Budget to be drawn up': 'Permite que um orçamento seja estabelecido', 'Allows authorized users to control which layers are available to the situation map.': 'Permite usuários autorizados a controlar quais camadas estão disponíveis no mapa de situação.', 'Alternative Item Details': 'Detalhes do Item alternativo', 'Alternative Item added': 'Item alternativo incluído', 'Alternative Item deleted': 'Item alternativo excluído', 'Alternative Item updated': 'Item Alternativo atualizado', 'Alternative Item': 'Item Alternativo', 'Alternative Items': 'Itens alternativos', 'Alternative places for studying': 'Locais alternativos para estudo', 'Ambulance Service': 'Serviço de Ambulância', 'An asset must be assigned to a person, site OR location.': 'Um ATIVO deve ser designado a uma pessoa, local ou site.', 'An intake system, a warehouse management system, commodity tracking, supply chain management, procurement and other asset and resource management capabilities.': 'Um sistema de admissão, um sistema de gestão de depósitos, tracking and commodity, gestão da cadeia de fornecimentos, aquisições de ativos e outros e os recursos de gerenciamento de recurso.', 'An item which can be used in place of another item': 'Um item que pode ser utilizado no lugar de outro item', 'Analysis of Completed Surveys': 'Análise das Pesquisas Concluídas', 'Animal Die Off': 'Morte Animal', 'Animal Feed': 'Alimentação Animal', 'Answer Choices (One Per Line)': 'Resposta opções (Um por linha)', 'Antibiotics available': 'Antibióticos disponíveis', 'Antibiotics needed per 24h': 'Antibióticos necessário por H', 'Apparent Age': 'Idade aparente', 'Apparent Gender': 'Género aparente', 'Application Deadline': 'Prazo Final da aplicação', 'Applications': 'Requisições', 'Approve': 'Aprovar', 'Approved': 'aprovado', 'Approver': 'Aprovador', 'Arctic Outflow': 'Árctico Exfluxo', 'Area': 'Área', 'Areas inspected': 'Inspeccionados áreas', 'Assessment Details': 'Detalhes da Avaliação', 'Assessment Reported': 'Avaliação Relatada', 'Assessment Summaries': 'Sumário de Avaliações', 'Assessment Summary Details': 'Detalhes do sumário de avaliação', 'Assessment Summary added': 'Anexado sumário de avaliações', 'Assessment Summary deleted': 'Avaliação de resumo apagado', 'Assessment Summary updated': 'Sumário de avaliação atualizado', 'Assessment added': 'Avaliação incluída', 'Assessment admin level': 'Avaliação de nível administrativo', 'Assessment deleted': 'Avaliação excluída', 'Assessment timeline': 'sequência temporal de avaliação', 'Assessment updated': 'Avaliação atualizada', 'Assessment': 'Avaliação', 'Assessments Needs vs. Activities': 'Necessidades de Avaliações vs. Atividades', 'Assessments and Activities': 'Avaliações e Atividades', 'Assessments': 'avaliações', 'Assessments:': 'Avaliações', 'Assessor': 'Avaliador', 'Asset Assigned': 'Ativo Designado', 'Asset Assignment Details': 'Detalhes da Designação de Recursos', 'Asset Assignment deleted': 'Designação De ativo excluído', 'Asset Assignment updated': 'Atribuição de Ativo atualizada', 'Asset Assignments': 'Designações de Ativo', 'Asset Details': 'Detalhes do Ativo', 'Asset Log Details': 'Detalhes do Log de ativos', 'Asset Log Empty': 'Log de Ativos vazio', 'Asset Log Entry Added - Change Label': 'Adicionada uma entrada no Log de ativos -Alterar Etiqueta', 'Asset Log Entry deleted': 'Apagada uma entrada no Log de ativos', 'Asset Log Entry updated': 'Atualizada uma entrada no Log de Ativos', 'Asset Log': 'Log de ATIVOS', 'Asset Management': 'gerenciamento de recursos', 'Asset Number': 'número do recurso', 'Asset added': 'Ativo Incluído', 'Asset deleted': 'ativo excluído', 'Asset removed': 'Ativo Removido', 'Asset updated': 'recurso atualizado', 'Asset': 'Recurso', 'Assets are resources which are not consumable but are expected back, so they need tracking.': 'Os ativos são recursos que não são consumíveis e serão devolvidos, portanto precisam de rastreamento.', 'Assets': 'recursos', 'Assign Asset': 'designar recurso', 'Assign Group': 'Designar Grupo', 'Assign Staff': 'Atribuir Equipe', 'Assign to Org.': 'Designar para Org.', 'Assign to Organization': 'Atribuir para Organização', 'Assign to Person': 'Atribuir uma Pessoa', 'Assign to Site': 'Atribuir um Site', 'Assign': 'Designar', 'Assigned By': 'Designado por', 'Assigned To': 'Designado Para', 'Assigned to Organization': 'Designado para Organização', 'Assigned to Person': 'Designado para a Pessoa', 'Assigned to Site': 'Designado para o Site', 'Assigned to': 'Designado para', 'Assigned': 'Designado', 'Assignments': 'Designações', 'At/Visited Location (not virtual)': 'Em/Visitou Local (não virtual)', 'Attend to information sources as described in <instruction>': 'Participar de fontes de informação, conforme descrito em<instruction>', 'Attribution': 'Atribuição', 'Author': 'autor', 'Availability': 'Disponibilidade', 'Available Alternative Inventories': 'Alternativas de Inventário disponíveis', 'Available Alternative Inventory Items': 'Itens alternativos de Inventário disponíveis', 'Available Beds': 'camas disponíveis', 'Available Inventories': 'Inventários disponíveis', 'Available Inventory Items': 'Itens de inventário disponíveis', 'Available Messages': 'Mensagens disponíveis', 'Available Records': 'Registros disponíveis', 'Available databases and tables': 'Banco de Dados e Tabelas disponíveis', 'Available for Location': 'Disponível para locação', 'Available from': 'disponível de', 'Available in Viewer?': 'Disponível no visualizador?', 'Available until': 'Disponível até', 'Avoid the subject event as per the <instruction>': 'Evitar o assunto do evento de acordo com a', 'Background Color for Text blocks': 'Cor de segundo plano para blocos de texto', 'Background Color': 'Cor de Plano de Fundo', 'Baldness': 'Calvície', 'Bank/micro finance': 'banco/micro finanças', 'Barricades are needed': 'Barricadas são necessárias', 'Base Layer?': 'Camada De Base?', 'Base Location': 'Local da Base', 'Base Site Set': 'Conjunto de Site básico', 'Baseline Data': 'Dados básicos', 'Baseline Number of Beds': 'Numero de camadas base de camas', 'Baseline Type Details': 'Detalhes de Tipo de Linha Base', 'Baseline Type added': 'Tipo de Linha Base adicionado', 'Baseline Type deleted': 'Tipo de Linha Base removido', 'Baseline Type updated': 'Tipo de Linha Base actualizado', 'Baseline Type': 'Tipo de Linha Base', 'Baseline Types': 'Tipos de Linha Base', 'Baseline added': 'Camada Base incluída', 'Baseline deleted': 'Camada Base Excluída', 'Baseline number of beds of that type in this unit.': 'Numero de camadas base de camas desse tipo nesta unidade.', 'Baseline updated': 'Linha Base actulizada', 'Baselines Details': 'Detalhes de Camadas Base', 'Baselines': 'Camadas Base', 'Basic Assessment Reported': 'Avaliação Básica Relatada', 'Basic Assessment': 'Avaliação Básica', 'Basic Details': 'Detalhes Básicos', 'Basic reports on the Shelter and drill-down by region': 'Relatórios básicos sobre o Abrigo e abertura por região', 'Baud rate to use for your modem - The default is safe for most cases': 'Taxa de transmissão para ser usada pelo seu modem - O padrão é seguro para a maioria dos casos', 'Baud': 'Transmissão', 'Beam': 'feixe', 'Bed Capacity per Unit': 'Capacidade cama por Unidade', 'Bed Capacity': 'Capacidade de leitos', 'Bed Type': 'Tipo de cama', 'Bed type already registered': 'Tipo de cama já registrado', 'Below ground level': 'Abaixo do nível do solo', 'Beneficiary Type': 'Tipo de beneficiário', 'Biological Hazard': 'Risco Biológico', 'Biscuits': 'Biscoitos', 'Blizzard': 'Nevasca', 'Blood Type (AB0)': 'Tipo sanguíneo (AB0)', 'Blowing Snow': 'Soprando neve', 'Boat': 'Barco', 'Bodies found': 'Corpos encontrados', 'Bodies recovered': 'corpos recuperados', 'Body Recovery Request': 'Pedido de recuperação de corpos', 'Body Recovery Requests': 'Pedidos de recuperação de corpos', 'Body': 'corpo', 'Bomb Explosion': 'Explosão de bomba', 'Bomb Threat': 'Ameaça de bomba', 'Bomb': 'Bomba', 'Border Color for Text blocks': 'Cor da borda para blocos de texto', 'Bounding Box Insets': 'Delimitadora Inserções Caixa', 'Bounding Box Size': 'CAIXA delimitadora Tamanho', 'Brand Details': 'Detalhes da Marca', 'Brand added': 'Marca incluída', 'Brand deleted': 'Marca excluída', 'Brand updated': 'marca atualizada', 'Brand': 'Marca', 'Brands': 'marcas', 'Bricks': 'Tijolos', 'Bridge Closed': 'PONTE FECHADA', 'Bucket': 'Balde', 'Buddhist': 'Budista', 'Budget Details': 'Detalhes de Orçamento', 'Budget Updated': 'Orçamento Atualizado', 'Budget added': 'Orçamento incluído', 'Budget deleted': 'Orçamento excluído', 'Budget updated': 'Orçamento atualizado', 'Budget': 'Orçamento', 'Budgeting Module': 'Módulo de Orçamento', 'Budgets': 'Orçamentos', 'Buffer': 'buffer', 'Bug': 'erro', 'Building Assessments': 'Avaliações de construção', 'Building Collapsed': 'Construção Fechada', 'Building Name': 'Nome do edifício', 'Building Safety Assessments': 'Regras de Segurança do Edifício', 'Building Short Name/Business Name': 'Nome curto/Nome completo do Edifício', 'Building or storey leaning': 'Edifício ou andar em inclinação', 'Built using the Template agreed by a group of NGOs working together as the': 'Construído de acordo com o formulário acordado por um grupo de ONGs', 'Bulk Uploader': 'Carregador em massa', 'Bundle Contents': 'Conteúdo do Pacote', 'Bundle Details': 'Detalhes do Pacote', 'Bundle Updated': 'Pacote configurável ATUALIZADO', 'Bundle added': 'Pacote incluído', 'Bundle deleted': 'Pacote Excluído', 'Bundle updated': 'Pacote atualizado', 'Bundle': 'Pacote', 'Bundles': 'Pacotes', 'Burn ICU': 'Queimar ICU', 'Burn': 'Gravar', 'Burned/charred': 'Queimados/carbonizados', 'By Facility': 'Por Facilidade', 'By Inventory': 'Por Inventário', 'By Person': 'Por pessoa', 'By Site': 'Por Site', 'CBA Women': 'CBA Mulheres', 'CSS file %s not writable - unable to apply theme!': 'Arquivo CSS %s não é gravável - Impossível aplicar o tema!', 'Calculate': 'calcular', 'Camp Coordination/Management': 'Campo Coordenação/gestão', 'Camp Details': 'Detalhes do Alojamento', 'Camp Service Details': 'Detalhe do Serviço de Campo', 'Camp Service added': 'Serviço de Alojamento incluído', 'Camp Service deleted': 'Serviço de Alojamento excluído', 'Camp Service updated': 'Serviço de campo atualizado', 'Camp Service': 'Serviço de Alojamento', 'Camp Services': 'Serviço de campo', 'Camp Type Details': 'Detalhes do tipo de campo', 'Camp Type added': 'Tipo de Campo incluso.', 'Camp Type deleted': 'Tipo de campo excluído.', 'Camp Type updated': 'Tipo De acampamento atualizado', 'Camp Type': 'Tipo de Campo', 'Camp Types and Services': 'Tipos e serviços de acampamentos', 'Camp Types': 'TIPOS DE acampamento', 'Camp added': 'Alojamento incluído', 'Camp deleted': 'Alojamento excluído', 'Camp updated': 'Acampamento atualizado', 'Camp': 'Acampamento', 'Camps': 'Alojamentos', 'Can only disable 1 record at a time!': 'Pode desativar apenas 1 registro por vez!', 'Cancel Log Entry': 'Cancelar Registro De Entrada', 'Cancel Shipment': 'Cancelar Embarque', 'Cancel': 'Cancelar', 'Canceled': 'cancelado', 'Candidate Matches for Body %s': 'Candidato Corresponde ao Corpo %s', 'Canned Fish': 'Conservas de Peixe', 'Cannot be empty': 'Não pode ser vazio', 'Cannot disable your own account!': 'Voce não pode desativar sua própria conta!', 'Capacity (Max Persons)': 'Capacidade (Máximo De pessoas)', 'Capture Information on Disaster Victim groups (Tourists, Passengers, Families, etc.)': 'CAPTURA informações sobre grupos Desastre Vítima (Turistas, passageiros, Famílias, etc. ).', 'Capture Information on each disaster victim': 'Informações de captura em cada vítima Desastre', 'Capturing organizational information of a relief organization and all the projects they have in the region': 'Capturando informações organizacionais de uma organização de ajuda e todos os projetos têm na região', 'Capturing the projects each organization is providing and where': 'Capturando os projetos que cada organização está fornecendo e onde', 'Cardiology': 'Cardiologia', 'Cassava': 'Mandioca', 'Casual Labor': 'Trabalho Casual', 'Casualties': 'Acidentes', 'Catalog Details': 'Detalhes do Catálogo', 'Catalog Item added': 'Item incluído no catálogo', 'Catalog Item deleted': 'Catálogo de Item excluído', 'Catalog Item updated': 'Item do catálogo de atualização', 'Catalog Item': 'Item do catálogo de', 'Catalog Items': 'Itens do Catálogo', 'Catalog added': 'Catálogo Incluído', 'Catalog deleted': 'Catálogo excluído', 'Catalog updated': 'Catálogo Atualizado', 'Catalog': 'catálogo', 'Catalogs': 'Catálogos', 'Categories': 'Categorias', 'Category': 'category', 'Ceilings, light fixtures': 'Tetos, luminarias', 'Central point to record details on People': 'Ponto Central para registrar detalhes sobre pessoas', 'Certificate Catalog': 'Catálogo de Certificados', 'Certificate Details': 'Detalhes do Certificado', 'Certificate Status': 'Status do Certificado', 'Certificate added': 'Certificado incluído', 'Certificate deleted': 'Certificado Removido', 'Certificate updated': 'Certificado Actualizado', 'Certificates': 'Certificados', 'Certification Details': 'Detalhes da Certificação', 'Certification added': 'Certificação incluída', 'Certification deleted': 'Certificação excluída', 'Certification updated': 'Certificação atualizada', 'Certification': 'Certificação', 'Certifications': 'Certificações', 'Certifying Organization': 'Certificação da Organização', 'Change Password': 'Alterar Senha', 'Check Request': 'Verificar Pedido', 'Check for errors in the URL, maybe the address was mistyped.': 'Verifique se há erros na URL, talvez o endereço foi digitado incorretamente.', 'Check if the URL is pointing to a directory instead of a webpage.': 'Verifique se a URL está apontando para um diretório em vez de uma página da Web.', 'Check outbox for the message status': 'Outbox para verificar o status da mensagem', 'Check to delete': 'Verificar para Excluir', 'Check': 'Verifique', 'Check-in': 'Registrar Entrada', 'Check-out': 'Registrar Saída', 'Checked': 'verificado', 'Checklist created': 'Lista de verificação criada', 'Checklist deleted': 'Lista de verificação excluída', 'Checklist of Operations': 'Lista de Verificação das Operações', 'Checklist updated': 'Lista de verificação atualizado', 'Checklist': 'lista de verificação', 'Chemical Hazard': 'Risco Químico', 'Chemical, Biological, Radiological, Nuclear or High-Yield Explosive threat or attack': 'Ameaça ou ataque Químico, Biológico, Radiológico, Nuclear ou de alto concentração Explosiva', 'Chicken': 'Frango', 'Child (2-11)': 'Criança (2-11)', 'Child (< 18 yrs)': 'Criança (< 18 anos)', 'Child Abduction Emergency': 'Emergência de Rapto De Criança', 'Child headed households (<18 yrs)': 'Famílias chefiadas por Filho (<18 anos)', 'Child': 'Criança', 'Children (2-5 years)': 'Crianças (2 a 5 anos)', 'Children (5-15 years)': 'Crianças (5 a 15 anos)', 'Children (< 2 years)': 'Crianças (< 2 anos)', 'Children in adult prisons': 'Crianças nas prisões para adultos', 'Children in boarding schools': 'Crianças em internatos', 'Children in homes for disabled children': 'Crianças em lares para crianças deficientes', 'Children in juvenile detention': 'Crianças em detenção juvenil', 'Children in orphanages': 'Crianças nos orfanatos', 'Children living on their own (without adults)': 'Crianças vivendo por conta própria (sem adultos)', 'Children not enrolled in new school': 'Crianças não matriculadas em Nova Escola', 'Children orphaned by the disaster': 'Crianças órfãs pela catástrofe', 'Children separated from their parents/caregivers': 'Crianças SEPARADAS de seus pais/responsáveis', 'Children that have been sent to safe places': 'Crianças que foram enviadas para locais seguros', 'Children who have disappeared since the disaster': 'Crianças que desapareceram desde o desastre', 'Chinese (Taiwan)': 'Chinês (Taiwan)', 'Cholera Treatment Capability': 'Capacidade de Tratamento da Cólera', 'Cholera Treatment Center': 'Centro de Tratamento de Cólera', 'Cholera Treatment': 'Tratamento da cólera', 'Cholera-Treatment-Center': 'Centro de tratamento de cólera', 'Choose a new posting based on the new evaluation and team judgement. Severe conditions affecting the whole building are grounds for an UNSAFE posting. Localised Severe and overall Moderate conditions may require a RESTRICTED USE. Place INSPECTED placard at main entrance. Post all other placards at every significant entrance.': 'Escolha uma nova alocação baseada na nova avaliação e julgamento do time. Condições severas que afetem o prédio inteiro são base para uma colocação INSEGURA. Grave localizada e no geral condições moderadas podem exigir um USO RESTRITO. Local INSPECCIONADO cartaz na entrada principal. Coloque todos os outros cartazes em cada entrada importante.', 'Christian': 'Cristão', 'Church': 'Igreja', 'Circumstances of disappearance, other victims/witnesses who last saw the missing person alive.': 'Circunstâncias do desaparecimento, outras vítimas/testemunhas quais viram pela última vez a pessoa desaparecida viva.', 'City': 'CIDADE', 'Civil Emergency': 'Emergência Civil', 'Cladding, glazing': 'Revestimentos, vidros', 'Click on the link %(url)s to reset your password': 'Clique no link %(url)s para Reconfigurar sua senha', 'Click on the link %(url)s to verify your email': 'Clique no link %(url)s para verificar seu e-mail', 'Clinical Laboratory': 'Laboratório clínico', 'Clinical Operations': 'operações clinicas', 'Clinical Status': 'estado clínico', 'Closed': 'fechado', 'Clothing': 'vestuário', 'Cluster Details': 'Detalhes do Grupo', 'Cluster Distance': 'Distância entre Grupos', 'Cluster Subsector Details': 'Detalhes do sub-setor do cluster', 'Cluster Subsector added': 'Subsector de Grupos incluído', 'Cluster Subsector deleted': 'Subsector de Grupos removido', 'Cluster Subsector updated': 'Sub-setores do cluster atualizado', 'Cluster Subsector': 'Subsector de Grupos', 'Cluster Subsectors': 'Sub-setores do cluster', 'Cluster Threshold': 'Limite do Cluster', 'Cluster added': 'adicionar agrupamento', 'Cluster deleted': 'Grupo removido', 'Cluster updated': 'Cluster atualizado', 'Cluster': 'agrupamento', 'Cluster(s)': 'Grupo(s)', 'Clusters': 'clusters', 'Code': 'Código', 'Cold Wave': 'onda fria', 'Collapse, partial collapse, off foundation': 'Reduzir, reduzir parciais, off foundation', 'Collective center': 'Centro coletivo', 'Color for Underline of Subheadings': 'Cor para Sublinhar de Subposições', 'Color of Buttons when hovering': 'Cor dos botões quando erguidos', 'Color of bottom of Buttons when not pressed': 'Cor da parte inferior dos botões quando não for pressionado', 'Color of bottom of Buttons when pressed': 'Cor da parte de baixo dos botões quando pressionados', 'Color of dropdown menus': 'Cor de menus DROP-', 'Color of selected Input fields': 'Cor dos campos de entrada selecionados', 'Color of selected menu items': 'cor dos ítens selecionados do menu', 'Column Choices (One Per Line': 'Coluna de opções (uma por linha)', 'Columns, pilasters, corbels': 'Colunas, pilastras , cavaletes', 'Combined Method': 'Método combinado', 'Come back later. Everyone visiting this site is probably experiencing the same problem as you.': 'Volte mais tarde. Todos que visitam este site esta, provavelmente, enfrentando o mesmo problema que você.', 'Come back later.': 'Volte mais tarde.', 'Comments': 'Comentários', 'Commercial/Offices': 'Comercial/Escritórios', 'Commit Date': 'Commit Data', 'Commit from %s': 'Consolidação de s%', 'Commit': 'Consolidar', 'Commiting a changed spreadsheet to the database': 'Consolidando uma planilha alterada no banco de dados', 'Commitment Added': 'Compromisso Incluído', 'Commitment Canceled': 'Compromisso cancelado', 'Commitment Details': 'Detalhes do compromisso', 'Commitment Item Details': 'Detalhes do item de compromisso', 'Commitment Item added': 'Item de compromisso incluído', 'Commitment Item deleted': 'Item do compromisso excluído', 'Commitment Item updated': 'Compromisso Item atualizado', 'Commitment Item': 'Item do compromisso', 'Commitment Items': 'Itens compromisso', 'Commitment Status': 'Empenhamento Status', 'Commitment Updated': 'Compromisso Atualizado', 'Commitment': 'Comprometimento', 'Commitments': 'Compromissos', 'Committed By': 'Cometido por', 'Committed': 'Comprometido', 'Committing Inventory': 'Confirmando Inventário', 'Communication problems': 'Problemas de Comunicação', 'Community Centre': 'Comunidade Centro', 'Community Health Center': 'Centro Comunitário de Saúde', 'Community Member': 'Membro da Comunidade', 'Competencies': 'Competências', 'Competency Details': 'Competência Detalhes', 'Competency Rating Catalog': 'Catálogo de Classificação de Competências', 'Competency Rating Details': 'Detalhes da classificação de competências', 'Competency Rating added': 'Classificação de Habilidades incluída', 'Competency Rating deleted': 'Classificação de competência excluída', 'Competency Rating updated': 'Atualização da classificação de competências', 'Competency Ratings': 'Classificação de competências', 'Competency added': 'Competência incluída', 'Competency deleted': 'Competência excluído', 'Competency updated': 'Competência atualizada', 'Competency': 'Competência', 'Complete': 'Concluir', 'Completed': 'Concluído', 'Complexion': 'Compleição', 'Compose': 'Redigir', 'Compromised': 'Comprometida', 'Concrete frame': 'Quadro concreto', 'Concrete shear wall': 'Muro de corteconcreto', 'Condition': 'Condição', 'Configurations': 'Configurações', 'Configure Run-time Settings': 'Configurar as configurações de tempo de execução', 'Confirm Shipment Received': 'Confirmar Remessa Recebida', 'Confirmed': 'Confirmado', 'Confirming Organization': 'Confirmando Organização', 'Conflict Details': 'Detalhes Do conflito', 'Conflict Resolution': 'Resolução de Conflito', 'Consignment Note': 'NOTA REMESSA', 'Constraints Only': 'Somente restrições', 'Consumable': 'Consumível', 'Contact Data': 'Dados contato', 'Contact Details': 'Detalhes do contato', 'Contact Info': 'Informações de Contato', 'Contact Information Added': 'Informação de contato incluída', 'Contact Information Deleted': 'Informação de contato excluída', 'Contact Information Updated': 'Informações de contato atualizadas', 'Contact Information': 'Informações de Contato', 'Contact Method': 'Método de Contato', 'Contact Name': 'Nome do contato', 'Contact Person': 'Pessoa de Contato', 'Contact Phone': 'Telefone para Contato', 'Contact details': 'Detalhes do contato', 'Contact information added': 'Informações de contato incluídas', 'Contact information deleted': 'Informações de contato excluídas', 'Contact information updated': 'Informações de contato atualizadas', 'Contact person(s) in case of news or further questions (if different from reporting person). Include telephone number, address and email as available.': 'Pessoa(s) a contactar em caso de notícias ou mais perguntas (se for diferente da pessoa que reportou). Incluir número de telefone, endereço e correio electrónico se disponível.', 'Contact us': 'Fale Conosco', 'Contact': 'contato', 'Contacts': 'contatos', 'Contents': 'Conteúdo', 'Contributor': 'Contribuidor', 'Conversion Tool': 'Ferramenta de Conversão', 'Cooking NFIs': 'Cozinhando NFIs', 'Cooking Oil': 'Cozinhando Óleo', 'Coordinate Conversion': 'COORDENAR a Conversão', 'Coping Activities': 'Atividades de lida', 'Copy': 'copiar', 'Corn': 'Milho', 'Cost Type': 'Tipo de custo', 'Cost per Megabyte': 'Custo por megabyte', 'Cost per Minute': 'Custo por Minuto', 'Country of Residence': 'País de Residência', 'Country': 'País', 'County': 'Município', 'Course Catalog': 'Catálogo de Cursos', 'Course Certificate Details': 'Detalhes do Certificado do Curso', 'Course Certificate added': 'Certificado do Curso adicionado', 'Course Certificate deleted': 'Certificado do Curso excluído', 'Course Certificate updated': 'Certificado do Curso atualizado', 'Course Certificates': 'Certificados de Curso', 'Course Details': 'Detalhes do curso', 'Course added': 'Curso incluído', 'Course deleted': 'Curso excluído', 'Course updated': 'Curso atualizado', 'Course': 'Curso', 'Courses': 'Cursos', 'Create & manage Distribution groups to receive Alerts': 'Criar & gerenciar grupos de distribuição de receber alertas', 'Create Activity Report': 'Criar Relatório de atividade', 'Create Activity Type': 'Criar tipo de atividade', 'Create Activity': 'Criar Atividade', 'Create Assessment': 'Criar Avaliação', 'Create Asset': 'Criar Ativo', 'Create Bed Type': 'Criar Tipo De Cama', 'Create Brand': 'Criar Marca', 'Create Budget': 'Criar Orçamento', 'Create Catalog Item': 'Criar Item de Catálogo', 'Create Catalog': 'Criar Catálogo', 'Create Certificate': 'Criar certificado', 'Create Checklist': 'Criar Lista de Verificação', 'Create Cholera Treatment Capability Information': 'Criar Informação sobre capacidade para tratamento de cólera', 'Create Cluster Subsector': 'Criar Subsetor de Cluster', 'Create Cluster': 'Criar cluster', 'Create Competency Rating': 'Criar Classificação da Competência', 'Create Contact': 'Criar contato', 'Create Course': 'Criar curso', 'Create Dead Body Report': 'Criar Relatório de Cadáver', 'Create Event': 'Criar Evento', 'Create Facility': 'Criar Recurso', 'Create Feature Layer': 'Criar camada de recurso', 'Create Group Entry': 'Criar Grupo De Entrada', 'Create Group': 'Criar Grupo', 'Create Hospital': 'Criar Hospital', 'Create Identification Report': 'Criar Identificação Relatório', 'Create Impact Assessment': 'Criar Avaliação de Impacto', 'Create Incident Report': 'Criar relatório de incidente', 'Create Incident': 'Criar Incidente', 'Create Item Category': 'Criar categoria de item', 'Create Item Pack': 'Criar pacote de itens', 'Create Item': 'Criar novo item', 'Create Kit': 'Criar novo Kit', 'Create Layer': 'Criar Camada', 'Create Location': 'Criar Local', 'Create Map Configuration': 'Criar Mapa de configuração', 'Create Marker': 'Criar Marcador', 'Create Member': 'Criar Membro', 'Create Mobile Impact Assessment': 'Criar Avaliação de Impacto Movel', 'Create Office': 'Criar Escritório', 'Create Organization': 'Criar Organização', 'Create Personal Effects': 'Criar efeitos pessoais', 'Create Project': 'Criar projeto', 'Create Projection': 'Criar Projeção', 'Create Rapid Assessment': 'Criar Avaliação Rápida', 'Create Report': 'Criar Relatório', 'Create Request': 'Criar Pedido', 'Create Resource': 'Criar Recurso', 'Create River': 'Criar Rio', 'Create Role': 'Criar Função', 'Create Room': 'Criar Sala', 'Create Scenario': 'Criar cenário', 'Create Sector': 'Criar Sector', 'Create Service Profile': 'Criar Perfil de Serviço', 'Create Shelter Service': 'Criar Serviço de Abrigo', 'Create Shelter Type': 'Criar Tipo de Abrigo', 'Create Shelter': 'Criar Abrigo', 'Create Skill Type': 'Criar Tipo de Habilidade', 'Create Skill': 'Criar Habilidade', 'Create Staff Member': 'Criar membro da equipe', 'Create Status': 'Criar Status', 'Create Task': 'Criar Tarefa', 'Create Theme': 'Criar Tema', 'Create User': 'Criar Usuário', 'Create Volunteer': 'Criar Voluntário', 'Create Warehouse': 'Criar Armazém', 'Create a Person': 'Criar uma pessoa', 'Create a group entry in the registry.': 'Criar uma entrada de grupo no registro.', 'Create, enter, and manage surveys.': 'Criar, digitar e gerenciar pesquisas.', 'Creation of Surveys': 'Criação de Pesquisas', 'Credential Details': 'Detalhes da Credencial', 'Credential added': 'Credencial incluída', 'Credential deleted': 'Credencial Excluída', 'Credential updated': 'Credencial ATUALIZADA', 'Credentialling Organization': 'Organização acreditada', 'Credentials': 'credenciais', 'Credit Card': 'Cartão de crédito', 'Crime': 'crime', 'Criteria': 'Critério', 'Currency': 'moeda', 'Current Entries': 'Entradas Atuais', 'Current Group Members': 'Membros do Grupo atual', 'Current Identities': 'Identidades atuais', 'Current Location': 'Posição Atual', 'Current Log Entries': 'Entradas de Log atuais', 'Current Memberships': 'Participações atuais', 'Current Notes': 'Notes atual', 'Current Records': 'Registros atuais', 'Current Registrations': 'Registros atuais', 'Current Status': 'Status atual', 'Current Team Members': 'Os atuais membros da equipe', 'Current Twitter account': 'Conta atual no Twitter', 'Current community priorities': 'Atuais prioridades da comunidade', 'Current general needs': 'Atuais necessidades gerais', 'Current greatest needs of vulnerable groups': 'Maiores necessidades atuais dos grupos vulneráveis', 'Current health problems': 'Problemas de saúde atuais', 'Current number of patients': 'Número atual de pacientes', 'Current problems, categories': 'Problemas atuais, categorias', 'Current problems, details': 'Problemas atuais, detalhes', 'Current request': 'Pedido atual', 'Current response': 'Resposta atual', 'Current session': 'Sessão atual', 'Currently no Certifications registered': 'Nenhuma certificação registrada atualmente', 'Currently no Competencies registered': 'Nenhuma competência registrada atualmente', 'Currently no Course Certificates registered': 'Nenhum Curso Certificado registrado atualmente', 'Currently no Credentials registered': 'Nenhuma credencial registrada atualmente', 'Currently no Missions registered': 'Nenhuma missão registrada atualmente', 'Currently no Skill Equivalences registered': 'Nenhuma equivelência de habilidade registrada atualmente', 'Currently no Trainings registered': 'Atualmente não há treinamentos registrados', 'Currently no entries in the catalog': 'Nenhuma entrada no catálogo atualmente', 'Custom Database Resource (e.g., anything defined as a resource in Sahana)': 'Bnaco de Dados customizado de Recursos (por exemplo, nada definido como recurso no Sahana)', 'DNA Profile': 'Perfil de DNA', 'DNA Profiling': 'Perfil de DNA', 'DVI Navigator': 'Navegador DVI', 'Dam Overflow': 'Barragem ESTOURO', 'Damage': 'dano', 'Dangerous Person': 'Pessoa perigosa', 'Dashboard': 'Painel', 'Data uploaded': 'Dados carregados', 'Data': 'Dados', 'Database': 'DATABASE', 'Date Available': 'Data Disponível', 'Date Received': 'Data do recebimento', 'Date Requested': 'Data do pedido', 'Date Required': 'Necessária', 'Date Sent': 'Data de Envio', 'Date Until': 'Data Até', 'Date and Time': 'Data e Hora', 'Date and time this report relates to.': 'Data e hora relacionadas a este relatório.', 'Date of Birth': 'Data de Nascimento', 'Date of Latest Information on Beneficiaries Reached': 'Data da última informação sobre Beneficiários Alcançado', 'Date of Report': 'Data do relatório', 'Date': 'date', 'Date/Time of Find': 'Pesquisa de data/hora', 'Date/Time of disappearance': 'Data/hora do desaparecimento', 'Date/Time when found': 'Data/hora quando foi encontrado', 'Date/Time when last seen': 'Data/ hora em que foi visto pela última vez', 'Date/Time': 'data/hora', 'De-duplicator': 'Anti duplicador', 'Dead Body Details': 'Detalhes do Cadáver', 'Dead Body Reports': 'Relatórios de Cadáver', 'Dead Body': 'Cadáver', 'Dead body report added': 'Relatório de cadaver incluso.', 'Dead body report deleted': 'Relatório de cadáver excluído.', 'Dead body report updated': 'Relatório de cadáver atualizado', 'Deaths in the past 24h': 'Mortes nas últimas 24 horas', 'Deaths/24hrs': 'Mortes/24hrs', 'Decimal Degrees': 'Graus decimais', 'Decision': 'DECISÃO', 'Decomposed': 'Decomposto', 'Default Height of the map window.': 'Altura Padrão da janela do mapa.', 'Default Map': 'Mapa padrão', 'Default Marker': 'Padrão de mercado', 'Default Width of the map window.': 'Padrão de largura da janela do mapa.', 'Default synchronization policy': 'Política de sincronização de padrão', 'Defecation area for animals': 'Área de defecação para animais', 'Define Scenarios for allocation of appropriate Resources (Human, Assets & Facilities).': 'Cenários De definir para alocação adequado de recursos (humanos, Ativos & instalações).', 'Defines the icon used for display of features on handheld GPS.': 'Define o ícone utilizado para exibição de recursos no GPS portátil.', 'Defines the icon used for display of features on interactive map & KML exports.': 'Define o ícone utilizado para exibição de recursos no mapa interativo & exportações KML.', 'Defines the marker used for display & the attributes visible in the popup.': 'Define o marcador utilizado para exibir & os atributos visíveis no pop-up.', 'Degrees must be a number between -180 and 180': 'Os graus devem ser um número entre -180 e 180', 'Dehydration': 'Desidratação', 'Delete Alternative Item': 'EXCLUIR Item Alternativo', 'Delete Assessment Summary': 'Excluir Resumo da Avaliação', 'Delete Assessment': 'Excluir Avaliação', 'Delete Asset Assignment': 'Excluir o recurso designado', 'Delete Asset Log Entry': 'EXCLUIR recurso de entrada de Log', 'Delete Asset': 'Excluir Ativo', 'Delete Baseline Type': 'apagar tipo de linha base', 'Delete Baseline': 'apagar linha base', 'Delete Brand': 'apagar marca', 'Delete Budget': 'apagar orçamento', 'Delete Bundle': 'apagar pacote', 'Delete Catalog Item': 'apagar item do catálogo', 'Delete Catalog': 'Excluir o Catálogo', 'Delete Certificate': 'Excluir Certificado', 'Delete Certification': 'Excluir Certificação', 'Delete Cluster Subsector': 'EXCLUIR Cluster Subsector', 'Delete Cluster': 'Exclui Cluster', 'Delete Commitment Item': 'Excluir Item de Compromisso', 'Delete Commitment': 'Excluir Compromisso', 'Delete Competency Rating': 'Excluir Classificação da Competência', 'Delete Competency': 'Excluir Competência', 'Delete Contact Information': 'Excluir Informações de Contato', 'Delete Course Certificate': 'Excluir Certificado do Curso', 'Delete Course': 'Excluir Curso', 'Delete Credential': 'Excluir Credencial', 'Delete Document': 'Excluir documento', 'Delete Donor': 'Excluir Dador', 'Delete Entry': 'Excluir Entrada', 'Delete Event': 'Excluir Evento', 'Delete Feature Layer': 'Excluir Camada de Componentes', 'Delete Group': 'Excluir Grupo', 'Delete Hospital': 'Excluir Hospital', 'Delete Image': 'Excluir Imagem', 'Delete Impact Type': 'Excluir Tipo De Impacto', 'Delete Impact': 'Excluir Impacto', 'Delete Incident Report': 'Excluir Relatório de Incidentes', 'Delete Inventory Item': 'Excluir Item De Inventário', 'Delete Item Category': 'Excluir categoria de Itens', 'Delete Item Pack': 'Excluir Pacote de Itens', 'Delete Item': 'Excluir Item', 'Delete Job Role': 'Excluir Cargo', 'Delete Key': 'Tecla de exclusão', 'Delete Kit': 'Excluir Kit', 'Delete Layer': 'Excluir Camada', 'Delete Level 1 Assessment': 'Excluir Nível 1 Avaliação', 'Delete Level 2 Assessment': 'Excluir Nível 2 Avaliação', 'Delete Location': 'Excluir locação', 'Delete Map Configuration': 'Excluir Mapa de configuração', 'Delete Marker': 'Excluir Marcador', 'Delete Membership': 'Excluir membro', 'Delete Message': 'Excluir mensagem', 'Delete Mission': 'Excluir Missão', 'Delete Need Type': 'Excluir tipos de necessidades', 'Delete Need': 'Excluir necessidades', 'Delete Office': 'Excluir escritório', 'Delete Organization': 'Excluir organização', 'Delete Peer': 'Excluir par', 'Delete Person': 'excluir pessoa', 'Delete Photo': 'Excluir Foto', 'Delete Population Statistic': 'Excluir População Estatística', 'Delete Position': 'Excluir Posição', 'Delete Project': 'Excluir Projeto', 'Delete Projection': 'Excluir Projeção', 'Delete Rapid Assessment': 'Excluir Avaliação Rápida', 'Delete Received Item': 'Excluir Item Recebido', 'Delete Received Shipment': 'Excluir Embarque Recebido', 'Delete Record': 'Excluir Registro', 'Delete Report': 'Excluir Relatório', 'Delete Request Item': 'Excluir item de solicitação', 'Delete Request': 'Excluir Solicitação', 'Delete Resource': 'Excluir Recurso', 'Delete Room': 'Excluir Sala', 'Delete Scenario': 'Excluir Cenário', 'Delete Section': 'Excluir seção', 'Delete Sector': 'Excluir Setor', 'Delete Sent Item': 'Excluir Item Enviado', 'Delete Sent Shipment': 'Excluir Embarque Enviado', 'Delete Service Profile': 'Excluir perfil de serviço', 'Delete Setting': 'Excluir Definição', 'Delete Skill Equivalence': 'Excluir equivalência de habilidade', 'Delete Skill Provision': 'Excluir Provisão de Habilidade', 'Delete Skill Type': 'Excluir Tipo de Habilidade', 'Delete Skill': 'Excluir habilidade', 'Delete Staff Type': 'Excluir Tipo De Equipe', 'Delete Status': 'Excluir Posição/Estado', 'Delete Subscription': 'Excluir assinatura', 'Delete Subsector': 'Excluir subsetor', 'Delete Survey Answer': 'Excluir reposta da pesquisa', 'Delete Survey Question': 'Excluir pergunta da pesquisa', 'Delete Survey Section': 'Excluir seção da pesquisa', 'Delete Survey Series': 'Excluir série da pesquisa', 'Delete Survey Template': 'Excluir modelo da pesquisa', 'Delete Training': 'Excluir Treinamento', 'Delete Unit': 'Excluir Unidade', 'Delete User': 'Excluir usuário', 'Delete Volunteer': 'Excluir Voluntário', 'Delete Warehouse': 'Excluír Armazém', 'Delete from Server?': 'Excluir do Servidor?', 'Delete': 'Excluir', 'Delphi Decision Maker': 'tomador de decisão Delphi', 'Demographic': 'Demográfico', 'Demonstrations': 'Demonstrações', 'Dental Examination': 'Exame Dentário', 'Dental Profile': 'Perfil Dentário', 'Describe the condition of the roads to your hospital.': 'Descreva as condições da estrada até o seu hospital.', 'Describe the procedure which this record relates to (e.g. "medical examination")': 'Descreva o procedimento ao qual este registro está relacionado (Ex: "exame médico")', 'Description of Contacts': 'Descrição dos Contatos', 'Description of defecation area': 'Descrição da área de defecação', 'Description of drinking water source': 'Descrição da fonte de água potável', 'Description of sanitary water source': 'Descrição da fonte de água sanitária', 'Description of water source before the disaster': 'Descrição da fonte de água antes do desastre', 'Description': 'Descrição', 'Descriptive Text (e.g., Prose, etc)': 'Texto Descritivo (por exemplo, Prosa, etc.)', 'Desire to remain with family': 'O desejo de permanecer com a família', 'Destination': 'destino', 'Destroyed': 'Destruído', 'Details field is required!': 'Campo de detalhes é obrigatório!', 'Details': 'detalhes', 'Dialysis': 'Diálise', 'Diaphragms, horizontal bracing': 'Diafragmas, interditará horizontal', 'Diarrhea': 'Diarréia', 'Dignitary Visit': 'Visita de Dignatários', 'Direction': 'Endereço', 'Disable': 'Desativar', 'Disabled participating in coping activities': 'Deficiente participando de enfrentamento', 'Disabled': 'desativado', 'Disabled?': 'Desativado?', 'Disaster Victim Identification': 'Identificação de Vítima de Desastre', 'Disaster Victim Registry': 'Registro de Vítima de Desastre', 'Disaster clean-up/repairs': 'Desastre limpeza/reparos', 'Discharge (cusecs)': 'Quitação (cusecs)', 'Discharges/24hrs': 'Descargas/24horas', 'Discussion Forum on item': 'Fórum de discussão do item', 'Discussion Forum': 'Fórum de Discussão', 'Disease vectors': 'Vectores doença', 'Dispensary': 'Dispensário', 'Displaced Populations': 'Populações deslocadas', 'Displaced': 'Deslocadas', 'Display Polygons?': 'exibir Polígonos?', 'Display Routes?': 'Exibir Rotas?', 'Display Tracks?': 'exibir Trilhas?', 'Display Waypoints?': 'Exibir Rota?', 'Distance between defecation area and water source': 'Distância entre área de esgoto e fonte de água', 'Distance from %s:': 'Distância de %s:', 'Distance(Kms)': 'Distância(Kms)', 'Distribution groups': 'Grupos de distribuição', 'Distribution': 'Distribuição de', 'District': 'Distrito', 'Do you really want to delete these records?': 'Você realmente deseja excluir esses registros?', 'Do you want to cancel this received shipment? The items will be removed from the Inventory. This action CANNOT be undone!': 'Você deseja cancelar este carregamento que foi recebido? Os itens serão removidos do inventário. Esta ação não pode ser desfeita!', 'Do you want to cancel this sent shipment? The items will be returned to the Inventory. This action CANNOT be undone!': 'Você deseja cancelar esse carregamento enviado? Os itens serão retornados para o inventário. Esta ação não pode ser desfeita!', 'Do you want to receive this shipment?': 'Você deseja receber esse carregamento?', 'Do you want to send these Committed items?': 'Você deseja enviar esses itens Consolidados?', 'Do you want to send this shipment?': 'Você deseja enviar este carregamento?', 'Document Details': 'Detalhes do Documento', 'Document Scan': 'Scanear Documento', 'Document added': 'Documento incluído', 'Document deleted': 'Documento excluído', 'Document updated': 'Documento Atualizado', 'Document': 'documento', 'Documents and Photos': 'Documentos e Fotos', 'Documents': 'Documentos', 'Does this facility provide a cholera treatment center?': 'Esta facilidade proporciona um centro de tratamento da cólera?', 'Doing nothing (no structured activity)': 'Fazendo nada (sem atividade estruturada)', 'Dollars': 'dólares', 'Domain': 'domínio', 'Domestic chores': 'Afazeres domésticos', 'Donated': 'Doado', 'Donation Certificate': 'Certificado de doaçao', 'Donation Phone #': 'Número de Telefone de doaçao', 'Donor Details': 'Doador Detalhes', 'Donor added': 'Doador incluído', 'Donor deleted': 'Doador excluído', 'Donor updated': 'Doador ATUALIZADO', 'Donor': 'Dador', 'Donors Report': 'Relatório de Doadores', 'Donors': 'Doadores', 'Door frame': 'Quadro de porta', 'Download PDF': 'Fazer download do PDF', 'Draft': 'rascunho', 'Drainage': 'Drenagem', 'Drawing up a Budget for Staff & Equipment across various Locations.': 'Elaborar um orçamento para Equipe & Equipamento de vários locais.', 'Drill Down by Group': 'Detalhar por grupo', 'Drill Down by Incident': 'Detalhar por incidente', 'Drill Down by Shelter': 'Detalhar por abrigo', 'Driving License': 'Carteira de Motorista', 'Drought': 'Seca', 'Drugs': 'Drogas', 'Dug Well': 'Cavaram Bem', 'Duplicate?': 'Duplicado?', 'Duration': 'Duração', 'Dust Storm': 'Tempestade de Poeira', 'Dwelling': 'Habitação', 'Dwellings': 'Habitações', 'EMS Reason': 'Razão EMS', 'ER Status Reason': 'Razão ER Status', 'Early Recovery': 'Início De Recuperação', 'Earthquake': 'Terremotos', 'Edit Activity': 'Editar Atividade', 'Edit Address': 'Editar Endereço', 'Edit Alternative Item': 'Editar Item Alternativo', 'Edit Application': 'Editar Aplicação', 'Edit Assessment Summary': 'Editar resumo da avaliação', 'Edit Assessment': 'Editar avaliação', 'Edit Asset Assignment': 'Editar designação do recurso', 'Edit Asset Log Entry': 'EDITAR ENTRADA DE Log de ATIVOs', 'Edit Asset': 'Editar recurso', 'Edit Baseline Type': 'Editar tipo de base de avaliação', 'Edit Baseline': 'Editar base de avaliação', 'Edit Brand': 'Editar marca', 'Edit Budget': 'Editar orçamento', 'Edit Bundle': 'Editar Pacote', 'Edit Camp Service': 'EDITAR Serviço de acampamento', 'Edit Camp Type': 'Editar Tipo de Campo', 'Edit Camp': 'EDITAR acampamento', 'Edit Catalog Item': 'Editar item do catálogo', 'Edit Catalog': 'Editar catálogo', 'Edit Certificate': 'Editar Certificado', 'Edit Certification': 'Editar Certificação', 'Edit Cluster Subsector': 'Editar subgrupo', 'Edit Cluster': 'Editar grupo', 'Edit Commitment Item': 'Editar Item De Compromisso', 'Edit Commitment': 'Editar compromisso', 'Edit Competency Rating': 'Editar Classificação da Competência', 'Edit Competency': 'Editar Competência', 'Edit Contact Information': 'Editar Informações de Contato', 'Edit Contact': 'Editar Contato', 'Edit Contents': 'Editar Conteúdo', 'Edit Course Certificate': 'Editar Certificado de Curso', 'Edit Course': 'Editar Curso', 'Edit Credential': 'Editar Credencial', 'Edit Dead Body Details': 'Editar Detalhes do Cadáver', 'Edit Description': 'Editar Descrição', 'Edit Details': 'Editar detalhes', 'Edit Disaster Victims': 'Editar vítimas do desastre', 'Edit Document': 'Editar documento', 'Edit Donor': 'Editar Doador', 'Edit Email Settings': 'Editar As Configurações De E-Mail', 'Edit Entry': 'Editar Entrada', 'Edit Event': 'Editar evento', 'Edit Facility': 'Editar recurso', 'Edit Feature Layer': 'Editar Recurso Camada', 'Edit Flood Report': 'Editar Relatório de Enchente', 'Edit Gateway Settings': 'Editar Configurações de Gateway', 'Edit Group': 'Grupo de edição', 'Edit Hospital': 'Editar Hospital', 'Edit Human Resource': 'Editar Recursos Humanos', 'Edit Identification Report': 'Editar Relatório de identificação', 'Edit Identity': 'Editar Identidade', 'Edit Image Details': 'Editar Detalhes da Imagem', 'Edit Image': 'Editar Imagem', 'Edit Impact Type': 'Editar Tipo De Impacto', 'Edit Impact': 'Editar Impacto', 'Edit Incident Report': 'Editar Relatório de Incidente', 'Edit Inventory Item': 'Editar Item De Inventário', 'Edit Item Category': 'Editar Item de categoria', 'Edit Item Pack': 'Editar Pacote de Itens', 'Edit Item': 'Editar Item', 'Edit Job Role': 'Editar cargo', 'Edit Key': 'Editar Tecla', 'Edit Kit': 'Editar Kit', 'Edit Layer': 'Editar Camada', 'Edit Level %d Locations?': 'Editar Locais Nível d% ?', 'Edit Level 1 Assessment': 'Editar Avaliação Nível 1', 'Edit Level 2 Assessment': 'Editar nível 2 de acesso', 'Edit Location': 'Local de edição', 'Edit Log Entry': 'EDITAR ENTRADA DE Log', 'Edit Map Configuration': 'Editar Mapa de configuração', 'Edit Map Services': 'Editar mapa de serviços', 'Edit Marker': 'Marcador de Edição', 'Edit Membership': 'Editar inscrição', 'Edit Message': 'Editar mensagem', 'Edit Messaging Settings': 'Editar Configurações De Mensagens', 'Edit Mission': 'Editar Missão', 'Edit Modem Settings': 'Editar Configurações Do Modem', 'Edit Need Type': 'Editar tipo de necessidade', 'Edit Need': 'Ediçao Necessária', 'Edit Note': 'Editar nota', 'Edit Office': 'Escritório de edição', 'Edit Options': 'Opções de edição', 'Edit Organization': 'Organizar edições', 'Edit Parameters': 'Parametros de edição', 'Edit Peer Details': 'Detalhes do par editado', 'Edit Person Details': 'Editar detalhes pessoais', 'Edit Personal Effects Details': 'Editar detalhes de objectos pessoais', 'Edit Photo': 'Editar Foto', 'Edit Population Statistic': 'Editar Estatística da População', 'Edit Position': 'Editar Posição', 'Edit Problem': 'Editar Problema', 'Edit Project': 'Editar Projecto', 'Edit Projection': 'Editar Projeção', 'Edit Rapid Assessment': 'Editar Rápida Avaliação', 'Edit Received Item': 'Editar Item Recebido', 'Edit Received Shipment': 'Editar Embarque Recebido', 'Edit Record': 'Editar Registro', 'Edit Registration Details': 'Editar Detalhes De Registro', 'Edit Registration': 'Editar Registro', 'Edit Report': 'Editar Relatório', 'Edit Request Item': 'Editar Item Pedido', 'Edit Request': 'Editar Pedido', 'Edit Resource': 'Editar Recurso', 'Edit River': 'EDITAR RIO', 'Edit Role': 'Editar Função', 'Edit Room': 'Editar Sala', 'Edit Scenario': 'Editar cenário', 'Edit Sector': 'Editar Setor', 'Edit Sent Item': 'Editar Item Enviado', 'Edit Setting': 'Editar Definição', 'Edit Settings': 'Editar Configurações', 'Edit Shelter Service': 'Editar Serviço de Abrigo', 'Edit Shelter Type': 'EDITAR Tipo De Abrigo', 'Edit Shelter': 'EDITAR ABRIGO', 'Edit Skill Equivalence': 'Editar Equivalência de Habilidade', 'Edit Skill Provision': 'Editar Habilidade de Fornecimento', 'Edit Skill Type': 'editar tipo de competência', 'Edit Skill': 'editar competência', 'Edit Solution': 'editar solução', 'Edit Staff Member Details': 'Editar detalhes do membro da equipe', 'Edit Staff Type': 'EDITAR Tipo De Equipe', 'Edit Staff': 'editar pessoal', 'Edit Subscription': 'Editar assinatura', 'Edit Subsector': 'EDITAR Subsector', 'Edit Survey Answer': 'Editar resposta da pesquisa', 'Edit Survey Question': 'Editar pergunta da pesquisa', 'Edit Survey Section': 'EDITAR Seção de Pesquisa', 'Edit Survey Series': 'EDITAR Pesquisa de Série', 'Edit Survey Template': 'EDITAR MODELO DE PESQUISA', 'Edit Task': 'Editar Tarefa', 'Edit Team': 'Editar equipe', 'Edit Theme': 'Editar tema', 'Edit Themes': 'EDITAR TEMAs', 'Edit Ticket': 'EDITAR Bilhete', 'Edit Track': 'EDITAR RASTREAMENTO', 'Edit Training': 'Editar Treinamento', 'Edit Tropo Settings': 'Editar Configurações Tropo', 'Edit User': 'Editar Usuário', 'Edit Volunteer Availability': 'Editar Disponibilidade de Voluntário', 'Edit Volunteer Details': 'Editar Detalhes de Voluntário', 'Edit Warehouse': 'Editar Armazém', 'Edit current record': 'Editar Registro Atual', 'Edit message': 'Editar mensagem', 'Edit the Application': 'Editar a Aplicação', 'Edit': 'Editar', 'Editable?': 'Editável?', 'Education materials received': 'Materiais de educação recebido', 'Education materials, source': 'materiais de Educação, origem', 'Education': 'Educação', 'Effects Inventory': 'Inventário de efeitos', 'Eggs': 'Ovos', 'Either a shelter or a location must be specified': 'Um abrigo ou um local deve ser especificado', 'Either file upload or document URL required.': 'Um arquivo de upload ou URL do documento são necessários.', 'Either file upload or image URL required.': 'Um arquivo de upload ou URL de imagem são necessárias.', 'Elderly person headed households (>60 yrs)': 'Chefes de Familia de idade avançada (>60 anos)', 'Electrical': 'Elétrico', 'Electrical, gas, sewerage, water, hazmats': 'Elétrica, gás, esgotos, água, hazmats', 'Elevated': 'Elevado', 'Elevators': 'Elevadores', 'Email Address': 'Endereço de e-mail', 'Email Settings': 'Configurações de e-mail', 'Email settings updated': 'As configurações de e-mail atualizado', 'Email': 'E-mail', 'Embalming': 'Embalsamento', 'Embassy': 'Embaixada', 'Emergency Capacity Building project': 'Plano de emergência de capacidade dos prédios', 'Emergency Department': 'Departamento de Emergência', 'Emergency Shelter': 'Abrigo de Emergência', 'Emergency Support Facility': 'Recurso De Suporte de emergência', 'Emergency Support Service': 'Suporte do Serviço de Emergência', 'Emergency Telecommunications': 'Emergência De Telecomunicações', 'Enable/Disable Layers': 'Ativar/Desativar Camadas', 'Enabled': 'Habilitado', 'End Date': 'Data de encerramento', 'End date should be after start date': 'Data Final deve ser maior do que a data de início', 'End date': 'Data de Término', 'End of Period': 'Fim de Período', 'English': 'Inglês', 'Enter Coordinates:': 'Entre as coordenadas:', 'Enter a GPS Coord': 'Digite uma Coordada GPS', 'Enter a name for the spreadsheet you are uploading (mandatory).': 'Digite um nome para a planilha que está fazendo Upload (obrigatório).', 'Enter a new support request.': 'Digite um pedido novo de suporte.', 'Enter a unique label!': 'Digite um rótulo exclusivo!', 'Enter a valid date before': 'Digite uma data válida antes de', 'Enter a valid email': 'Insira um email válido', 'Enter a valid future date': 'Digite uma data futura válida', 'Enter some characters to bring up a list of possible matches': 'Digite alguns caracteres para trazer uma lista de correspondências possíveis', 'Enter some characters to bring up a list of possible matches.': 'Digite alguns caracteres para trazer uma lista de correspondências possíveis.', 'Enter tags separated by commas.': 'Insira as tags separadas por vírgulas.', 'Enter the same password as above': 'Digite a mesma senha acima', 'Entered': 'Inserido', 'Entering a phone number is optional, but doing so allows you to subscribe to receive SMS messages.': 'Digitar um número de telefone é opcional, mas ao fazer isto permite a voçe se registrar para receber mensagens SMS.', 'Entry deleted': 'Entrada removida', 'Environment': 'Ambiente do', 'Equipment': 'Equipamento', 'Error encountered while applying the theme.': 'Erro encontrado ao aplicar o tema.', 'Error in message': 'Erro na mensagem', 'Error logs for "%(app)s"': 'Registro de erros de "%(app)s"', 'Error: no such record': 'Erro: nenhum registro', 'Errors': 'Erros', 'Est. Delivery Date': 'Est. Data de entrega', 'Estimated # of households who are affected by the emergency': '# estimado das famílias que são afetados pela emergência', 'Estimated # of people who are affected by the emergency': '# estimado de pessoas que são afetados pela emergência', 'Estimated Overall Building Damage': 'Dano total de construção estimado', 'Estimated total number of people in institutions': 'Número total estimado de pessoas em instituições', 'Evacuating': 'abandono', 'Evaluate the information in this message. (This value SHOULD NOT be used in public warning applications.)': 'Valide as informações desta mensagem. (Este valor não deve ser utilizado em aplicações de aviso público. ).', 'Event Details': 'Detalhes do evento', 'Event added': 'Evento incluído', 'Event deleted': 'Evento excluído', 'Event updated': 'Evento atualizado', 'Event': 'Evento', 'Events': 'eventos', 'Example': 'Exemplo:', 'Exceeded': 'Excedido', 'Excellent': 'Excelente', 'Exclude contents': 'Excluir conteúdo', 'Excreta disposal': 'Eliminação de dejetos', 'Execute a pre-planned activity identified in <instruction>': 'Executar uma atividade pré-planejada identificada no', 'Exercise': 'Excercício', 'Exercise?': 'Exercício ?', 'Exercises mean all screens have a watermark & all notifications have a prefix.': "Exercícios significa que todas as telas têm uma marca d'água & todas as comunicações têm um prefixo.", 'Existing Placard Type': 'Cartaz existente Tipo', 'Existing food stocks': 'Estoques de alimentos existente', 'Existing location cannot be converted into a group.': 'Local Existente não pode ser convertido em um grupo.', 'Exits': 'Saídas', 'Experience': 'Experiência', 'Expiry Date': 'Data de expiração', 'Explosive Hazard': 'Perigo explosivo', 'Export Data': 'Exportar dados.', 'Export Database as CSV': 'Exportar o banco de dados como CSV', 'Export in GPX format': 'Exportar no formato GPX', 'Export in KML format': 'Exportar no formato KML', 'Export in OSM format': 'Exportar no formato OSM', 'Export in PDF format': 'Exportar no formato PDF', 'Export in RSS format': 'Exportar no formato RSS', 'Export in XLS format': 'Exportar no formato XLS', 'Export': 'Exportar', 'Exterior Only': 'Exterior Apenas', 'Exterior and Interior': 'Exterior e Interior', 'Eye Color': 'Cor dos Olhos', 'Facial hair, color': 'Cabelo Facial, cor', 'Facial hair, type': 'Cabelo Facial, digite', 'Facial hear, length': 'Facial ouvir, COMPRIMENTO', 'Facilities': 'Instalações', 'Facility Details': 'Detalhes da Instalação', 'Facility Operations': 'Facilidades nas Operações', 'Facility Status': 'Status Facility', 'Facility Type': 'Tipo de Instalação', 'Facility added': 'Instalação incluída', 'Facility or Location': 'Instalação ou Local', 'Facility removed': 'Recurso removido', 'Facility updated': 'Recurso atualizado', 'Facility': 'Instalação', 'Fail': 'Falha', 'Failed!': 'Falha!', 'Fair': 'Razoável', 'Falling Object Hazard': 'Queda Objeto Risco', 'Families/HH': 'Famílias/HH', 'Family tarpaulins received': 'lonas de familia recebidas', 'Family tarpaulins, source': 'lonas de familia, fuente', 'Family': 'Familia', 'Family/friends': 'Família/amigos', 'Farmland/fishing material assistance, Rank': 'TERRAS/assistência de material de Pesca, posição', 'Fatalities': 'Fatalidades', 'Fax': 'Número do Fax', 'Feature Layer Details': 'Recurso Camada Detalhes', 'Feature Layer added': 'Recurso Camada incluída', 'Feature Layer deleted': 'Recurso Camada excluído', 'Feature Layer updated': 'Recurso Camada atualizada', 'Feature Layers': 'Camadas recurso', 'Feature Namespace': 'Espaço De recurso', 'Feature Request': 'Pedido de Componente', 'Feature Type': 'Tipo de Componente', 'Features Include': 'Componentes Incluidos', 'Female headed households': 'Famílias chefiadas por mulheres', 'Female': 'Sexo Feminino', 'Few': 'Poucos', 'Field Hospital': 'Campo Hospital', 'Field': 'Campo', 'File': 'arquivo', 'Fill in Latitude': 'Preencher na Latitude', 'Fill in Longitude': 'Preencher na Longitude', 'Filter Field': 'Filtro de Campo', 'Filter Value': 'Filtro de Valor', 'Filter': 'Filtro', 'Find All Matches': 'Localizar todos os equivalentes', 'Find Dead Body Report': 'Localizar Relatório de Cadáver', 'Find Hospital': 'Localizar Hospital', 'Find Person Record': 'Localizar registro de pessoa', 'Find Volunteers': 'Localizar Voluntários', 'Find a Person Record': 'Localizar um Registro de Pessoa', 'Find': 'Localizar', 'Finder': 'Localizador', 'Fingerprint': 'Impressão digital', 'Fingerprinting': 'Impressões digitais', 'Fingerprints': 'Impressões Digitais', 'Finish': 'Terminar', 'Finished Jobs': 'Tarefa Terminada', 'Fire suppression and rescue': 'Supressão e salvamento de incêndio', 'Fire': 'Fogo', 'First Name': 'Primeiro Nome', 'First name': 'Primeiro Nome', 'Fishing': 'Pesca', 'Flash Flood': 'Enchente', 'Flash Freeze': 'congelar o momento', 'Flexible Impact Assessments': 'Flexibilidade no Impacto de avaliações', 'Flood Alerts show water levels in various parts of the country': 'Os alertas de inundação mostram o nível da água em várias partes do país', 'Flood Alerts': 'Alertas de Enchente', 'Flood Report Details': 'Detalhes do Relatório de Inundação', 'Flood Report added': 'Relatório de Inundação incluído', 'Flood Report deleted': 'Relatório de Inundação removido', 'Flood Report updated': 'Relatório de Inundação actualizado', 'Flood Report': 'Relatório de Inundação', 'Flood Reports': 'Relatórios de Inundação', 'Flood': 'Enchente', 'Flow Status': 'posição de fluxo', 'Focal Point': 'Ponto Central', 'Fog': 'Nevoeiro', 'Food Supply': 'Alimentação', 'Food assistance': 'Ajuda alimentar', 'Footer file %s missing!': '% Arquivo rodapé ausente!', 'Footer': 'Rodapé', 'For a country this would be the ISO2 code, for a Town, it would be the Airport Locode.': 'Para um país este seria o código ISO2, para uma cidade, este seria o codigo do aeroporto (UNE/Locode).', 'For each sync partner, there is a default sync job that runs after a specified interval of time. You can also set up more sync jobs which could be customized on your needs. Click the link on the right to get started.': 'Para cada parceiro de sincronização, há uma tarefa de sincronização padrão que é executada após um intervalo de tempo especificado. Você também pode configurar mais tarefas de sincronização que podem ser customizadas de acordo com as suas necessidades. Clique no link à direita para começar.', 'For enhanced security, you are recommended to enter a username and password, and notify administrators of other machines in your organization to add this username and password against your UUID in Synchronization -> Sync Partners': 'Para segurança reforçada, é recomendável digitar um nome de usuário e senha, e notificar os administradores de outras máquinas em sua organização para incluir esse usuário e senha no UUID em Sincronização -> Parceiros De Sincronização', 'For live help from the Sahana community on using this application, go to': 'Para ajuda ao vivo da comunidade do Sahana sobre como utilizar esse aplicativo, vá para', 'For messages that support alert network internal functions': 'Para mensagens que suportam funções internas de alertas de rede', 'For more details on the Sahana Eden system, see the': 'Para obter mais detalhes sobre o sistema Sahana Eden, consulte o', 'For more information, see': 'Para obter mais informações, consulte o', 'For other types, the next screen will allow you to enter the relevant details...': 'Para outros tipos, a próxima tela permitirá que você digite os detalhes relevantes.', 'For': 'Por', 'Forest Fire': 'Incêndios florestais', 'Formal camp': 'Acampamento formal', 'Format': 'Formato', 'Forms': 'formulários', 'Found': 'localizado', 'Foundations': 'Fundações', 'Freezing Drizzle': 'Garoa gelada', 'Freezing Rain': 'Chuva Gelada', 'Freezing Spray': 'Spray Gelado', 'French': 'Francês', 'Friday': 'sexta-feira', 'From Inventory': 'A partir do Inventário', 'From Location': 'Do Local', 'From Organization': 'Da Organização', 'From Person': 'Da Pessoa', 'From': 'from', 'Frost': 'Geada', 'Fulfil. Status': 'Encher. Status', 'Fulfillment Status': 'Status de preenchimento', 'Full beard': 'Barba completa', 'Full': 'Cheio', 'Fullscreen Map': 'Mapa em tela cheia', 'Functions available': 'Funções disponíveis', 'Funding Organization': 'Financiar a Organização', 'Further Action Recommended': 'Mais Acção Recomendada', 'GIS Reports of Shelter': 'Relatórios GIS de abrigos', 'GIS integration to view location details of the Shelter': 'Integration GIS para visualizar detalhes do local do Abrigo', 'GPS Marker': 'Marcador De GPS', 'GPS Track File': 'Rastrear Arquivo GPS', 'GPS Track': 'Rastrear por GPS', 'GPX Track': 'GPX RASTREAR', 'GRN Status': 'Status GRN', 'GRN': 'NRG', 'Gale Wind': 'Temporal', 'Gap Analysis Map': 'Mapa de Análise de Falhas', 'Gap Analysis Report': 'Relatório de Análise de Falhas', 'Gap Analysis': 'Análise de Falhas', 'Gap Map': 'Mapa de Falhas', 'Gap Report': 'Relatório de Falhas', 'Gateway Settings': 'Configurações de Gateway', 'Gateway settings updated': 'Configurações de Gateway atualizadas', 'Gateway': 'Portão', 'Gender': 'Sexo', 'General Comment': 'Comentário Geral', 'General Medical/Surgical': 'Médico/Cirúrgico Geral', 'General emergency and public safety': 'Geral de emergência e segurança pública', 'General information on demographics': 'Informações gerais sobre demografia', 'General': 'geral', 'Generator': 'Gerador', 'Geocode': 'Geocodificar', 'Geocoder Selection': 'Seleção De geocodificador', 'Geometry Name': 'Nome da geometria', 'Geophysical (inc. landslide)': 'Geofísica (inc. deslizamento)', 'Geotechnical Hazards': 'RISCOS geotécnicos', 'Geotechnical': 'Geotécnica', 'Geraldo module not available within the running Python - this needs installing for PDF output!': 'Geraldo não disponíveis no módulo a execução Python- é necessário instalar para saída PDF!', 'Geraldo not installed': 'Geraldo não instalado', 'Get incoming recovery requests as RSS feed': 'Obter pedidos recebidos de recuperação como feed RSS', 'Give a brief description of the image, e.g. what can be seen where on the picture (optional).': 'Fornecer uma descrição breve da imagem, por exemplo, o que pode ser visto no local da imagem (opcional).', 'Give information about where and when you have seen them': 'Fornecer informações sobre onde e quando você os viu', 'Global Messaging Settings': 'Configurações Globais de Menssagem', 'Go to Request': 'Ir para Pedido', 'Go': 'ir', 'Goatee': 'Barbicha', 'Good Condition': 'Boa Condição', 'Good': 'Válido', 'Goods Received Note': 'Nota de Recebimento de Mercadorias', 'Government UID': 'GOVERNO UID', 'Government building': 'Prédios Públicos', 'Government': 'Governamental', 'Grade': 'Grau', 'Greek': 'grego', 'Green': 'verde', 'Ground movement, fissures': 'Movimento do solo terrestre, fissuras', 'Ground movement, settlement, slips': 'Movimento do solo terrestre, assentamentos, escorregões', 'Group Description': 'Descrição do Grupo', 'Group Details': 'Detalhes do grupo', 'Group Member added': 'Membro do grupo incluído', 'Group Members': 'membros do grupo', 'Group Memberships': 'Associados do Grupo', 'Group Name': 'Nome do grupo', 'Group Title': 'Título do grupo', 'Group Type': 'Tipo de grupo', 'Group added': 'Grupo adicionado', 'Group deleted': 'Grupo Excluído', 'Group description': 'Descrição do Grupo', 'Group updated': 'GRUPO ATUALIZADO', 'Group': 'Grupo', 'Groups removed': 'Grupos Removido', 'Groups': 'Grupos do', 'Guest': 'Convidado', 'HR Data': 'Dados de RH', 'HR Manager': 'Responsável de RH', 'Hail': 'granizo', 'Hair Color': 'Cor do Cabelo', 'Hair Length': 'Comprimento do cabelo', 'Hair Style': 'Estilo do Cabelo', 'Has additional rights to modify records relating to this Organization or Site.': 'Tem direitos adicionais para modificar os registros relativos a esta organização ou site.', 'Has data from this Reference Document been entered into Sahana?': 'Os dados deste documento de referência foi digitado no Sahana?', 'Has only read-only access to records relating to this Organization or Site.': 'Tem apenas acesso de leitura para os registros relativos a esta organização ou site.', 'Has the Certificate for receipt of the shipment been given to the sender?': 'O certificado de recepção do carregamento foi dado para o remetente?', 'Has the GRN (Goods Received Note) been completed?': 'O GRN (nota de mercadorias recebidas) foi concluído?', 'Hazard Pay': 'Pagar Risco', 'Hazardous Material': 'Material perigoso', 'Hazardous Road Conditions': 'Estradas em Condições de Risco', 'Header Background': 'Conhecimento de Chefia', 'Header background file %s missing!': 'Arquivo de Cabeçalho de Base %s ausente!', 'Headquarters': 'Matriz', 'Health care assistance, Rank': 'Assistência Saúde, Classificação', 'Health center with beds': 'Centro de saúde com camas', 'Health center without beds': 'Centro de saúde sem camas', 'Health center': 'Centro de Saúde', 'Health services status': 'Situação dos serviços de saúde', 'Health': 'Saúde', 'Healthcare Worker': 'Profissional de Saúde', 'Heat Wave': 'Onda de calor', 'Heat and Humidity': 'Calor e Umidade', 'Height (cm)': 'Altura (cm)', 'Height (m)': 'Altura (m)', 'Height': 'Altura', 'Help': 'Ajuda', 'Helps to monitor status of hospitals': 'Ajuda para monitorar status de hospitais', 'Helps to report and search for Missing Persons': 'Ajuda a reportar e procurar pessoas desaparecidas.', 'Helps to report and search for missing persons': 'Ajuda a reportar e procurar pessoas desaparecidas.', 'Here are the solution items related to the problem.': 'Aqui estão as soluções relacionadas ao problema.', 'Heritage Listed': 'Património Listado', 'Hierarchy Level %d Name': 'Hierarquia de Nível% de d Nome', 'Hierarchy Level 0 Name (e.g. Country)': 'Hierarquia Nível 0 Nome (por exemplo, País)', 'Hierarchy Level 0 Name (i.e. Country)': 'Hierarquia Nível 0 nome (por exemplo País)', 'Hierarchy Level 1 Name (e.g. Province)': 'Hierarquia Nível 1 Nome (por exemplo, Província)', 'Hierarchy Level 1 Name (e.g. State or Province)': 'Hierarquia Nível 1 nome (por exemplo, Estado ou Província)', 'Hierarchy Level 2 Name (e.g. District or County)': 'Hierarquia de Nível 2 Nome (por exemplo, Região ou Município)', 'Hierarchy Level 3 Name (e.g. City / Town / Village)': 'Hierarquia Nível 3 Nome (por exemplo, Cidade / Municipio / Vila)', 'Hierarchy Level 4 Name (e.g. Neighbourhood)': 'Hierarquia de Nível 4 Nome (por exemplo, Bairro)', 'Hierarchy Level 5 Name': 'Nome de Nível 5 na Hierarquia', 'High Water': "d'água alta", 'High': 'Alta', 'History': 'História', 'Hit the back button on your browser to try again.': 'Clique no ícone de voltar em seu navegador para tentar novamente.', 'Holiday Address': 'Endereço durante Feriado', 'Home Address': 'Endereço Residencial', 'Home Country': 'País natal', 'Home Crime': 'Crime Doméstico', 'Home': 'Residência', 'Hospital Details': 'Detalhes do Hospital', 'Hospital Status Report': 'Relatório de Status do Hospital', 'Hospital information added': 'Informações do hospital inclusas.', 'Hospital information deleted': 'Informações do hospital excluídas', 'Hospital information updated': 'informações do Hospital atualizadas', 'Hospital status assessment.': 'Avaliação de status do Hospital.', 'Hospitals': 'Hospitais', 'Hot Spot': 'ponto de acesso', 'Hour': 'Hora', 'Hours': 'Horas', 'Household kits received': 'Kits caseiros recebidos', 'Household kits, source': 'Kit de família, origem', 'How does it work?': 'Como funciona?', 'How is this person affected by the disaster? (Select all that apply)': 'Como esta pessoa é afetada pelo desastre? (selecione todos que se aplicam)', 'How long will the food last?': 'Quanto tempo irá durar a comida?', 'How many Boys (0-17 yrs) are Dead due to the crisis': 'Quantos rapazes (0-17 anos) estão Mortos devido à crise', 'How many Boys (0-17 yrs) are Injured due to the crisis': 'Quantos rapazes (0-17 anos) estão Feridos devido à crise', 'How many Boys (0-17 yrs) are Missing due to the crisis': 'Quantos rapazes (0-17 anos) estão Desaparecidos devido à crise', 'How many Girls (0-17 yrs) are Dead due to the crisis': 'Quantas garotas (0-17 anos) morreram devido à crise', 'How many Girls (0-17 yrs) are Injured due to the crisis': 'Quantas garotas (0-17 anos) estão feridas devido à crise', 'How many Girls (0-17 yrs) are Missing due to the crisis': 'Quantas garotas (0-17 anos) estão perdidas devido à crise', 'How many Men (18 yrs+) are Dead due to the crisis': 'Quantos homens (18 anos+) estão mortos devido à crise', 'How many Men (18 yrs+) are Injured due to the crisis': 'Quantos homens (18 anos +) são feridos devido à crise', 'How many Men (18 yrs+) are Missing due to the crisis': 'Quantos homens (18 anos +) estão ausentes devido à crise', 'How many Women (18 yrs+) are Dead due to the crisis': 'Quantas mulheres (+18 anos) estão mortas devido à crise', 'How many Women (18 yrs+) are Injured due to the crisis': 'Quantas mulheres (+18 anos) estão feridas devido à crise', 'How many Women (18 yrs+) are Missing due to the crisis': 'Quantas mulheres acima de 18 anos estão ausentes devido à crise', 'How many days will the supplies last?': 'Quantos dias irão durar os abastecimentos?', 'How many new cases have been admitted to this facility in the past 24h?': 'Quantos novos casos tenham sido admitidos a esta facilidade nas últimas 24 horas?', 'How many of the patients with the disease died in the past 24h at this facility?': 'Como muitos dos pacientes com a doença morreram nas últimas 24 horas nesta unidade?', 'How many patients with the disease are currently hospitalized at this facility?': 'Quantos pacientes com a doença estão atualmente internados nesta instalação?', 'How much detail is seen. A high Zoom level means lot of detail, but not a wide area. A low Zoom level means seeing a wide area, but not a high level of detail.': 'Quanto detalhe é visto. Um nível alto de Zoom mostra muitos detalhes, mas não uma grande área. Um nível de Zoom baixo significa ver uma grande área, mas não com um alto nível de detalhe.', 'Human Resource Details': 'Detalhes de Recursos Humanos', 'Human Resource Management': 'Gerenciamento de recursos humanos', 'Human Resource added': 'Recurso humano adicionado', 'Human Resource removed': 'Recursos Humanos removido', 'Human Resource updated': 'Recursos Humanos atualizado', 'Human Resource': 'Recursos humanos', 'Human Resources Management': 'Gerenciamento de Recursos Humanos', 'Human Resources': 'Recursos Humanos', 'Humanitarian NGO': 'ONG humanitária', 'Hurricane Force Wind': 'Furacão Força Vento', 'Hurricane': 'Furacão', 'Hygiene NFIs': 'Higiene NFIs', 'Hygiene kits received': 'Kits de higiene recebido', 'Hygiene kits, source': 'Kits de higiene, origem', 'Hygiene practice': 'Prática de higiene', 'Hygiene problems': 'PROBLEMAS DE HIGIENE', 'Hygiene': 'Higiene', 'I am available in the following area(s)': 'Estou disponível na(s) seguinte(s) área(s)', 'ID Tag Number': 'Número da Etiqueta de Identificação', 'ID Tag': 'Etiqueta de Identificação', 'ID type': 'Tipo de ID', 'Ice Pressure': 'Pressão de gelo', 'Iceberg': 'Icebergue', 'Identification Report': 'Identificação Relatório', 'Identification Reports': 'Relatórios de Identificação', 'Identification Status': 'Status da Identificação', 'Identified as': 'Identificado como', 'Identified by': 'Identificado por', 'Identity Details': 'Detalhes da identidade', 'Identity added': 'Identidade incluída', 'Identity deleted': 'Identidade excluída', 'Identity updated': 'Identidade atualizada', 'Identity': 'Identidade', 'If Staff have login accounts then they are given access to edit the details of the': 'Se o pessoal tiver contas de login, então lhes é dado acesso para editar os detalhes do', 'If a ticket was issued then please provide the Ticket ID.': 'Se um bilhete foi emitido então por favor forneça o ID do bilhete.', 'If a user verifies that they own an Email Address with this domain, the Approver field is used to determine whether & by whom further approval is required.': 'Se um usuário verifica que eles possuem um endereço de email com este domínio, o campo Aprovador é utilizado para determinar se e por quem aprovação adicional é necessária.', 'If it is a URL leading to HTML, then this will downloaded.': 'Se for uma URL levando a HTML, então este será baixado.', 'If neither are defined, then the Default Marker is used.': 'Se nem são definidos, então o Marcador Padrão é utilizado.', 'If no marker defined then the system default marker is used': 'Se nenhum marcador definido, o marcador padrão do sistema é utilizada', 'If no, specify why': 'Se não, especifique por que', 'If none are selected, then all are searched.': 'Se nenhuma for selecionada, então todos são procurados.', 'If the location is a geographic area, then state at what level here.': 'Se o local é uma área geográfica, então defina em que nível aqui.', 'If the request is for type "Other", you should enter a summary of the request here.': 'Se o pedido for para o tipo \\ " Outro", você deve digitar um resumo do pedido aqui.', 'If the request type is "Other", please enter request details here.': 'Se o tipo de pedido é "other", por favor, digite aqui detalhes do pedido.', 'If this field is populated then a user with the Domain specified will automatically be assigned as a Staff of this Organization': 'Se esse campo for preenchido, o usuário de um específico Domain será automaticamente registrado como funcionário desta organização.', 'If this is set to True then mails will be deleted from the server after downloading.': 'Se isso for ajustado para “True”, as correspondências serão deletadas do servidor depois que o downloading for feito.', 'If this record should be restricted then select which role is required to access the record here.': 'Se esse registro deve ser restrito, selecione qual regra é necessária para acessar o record aqui.', 'If this record should be restricted then select which role(s) are permitted to access the record here.': 'Se esse registro deve ser restrito, selectione qual (is) regra (s) serão permitidas para assessá-lo aqui.', 'If yes, specify what and by whom': 'Se SIM, Especifique o quê e por quem', 'If yes, which and how': 'Se sim, quais e como', 'If you do not enter a Reference Document, your email will be displayed to allow this data to be verified.': 'Se você não inserir um documento de referência, seu e-mail será exibido para permitir que esses dados sejam verificados.', 'If you know what the Geonames ID of this location is then you can enter it here.': 'Se voce conhecer o Geonames ID desta localização então voce poderá inserí-lo aqui.', 'If you know what the OSM ID of this location is then you can enter it here.': 'Se voce conhecer o OSM ID desta localização, então voce pode inserí-lo aqui.', 'If you need to add a new document then you can click here to attach one.': 'Se houver necessidade de incluir um novo documento então voce poderá clicar aqui para anexá-lo.', 'If you want several values, then separate with': 'Se voce deseja varios valores, separe com', 'If you would like to help, then please': 'Se você gostaria de ajudar, então por favor', 'Illegal Immigrant': 'Imigrante Ilegal', 'Image Details': 'Detalhes da Imagem', 'Image Tags': 'Imagem Tags', 'Image Type': 'Tipo de Imagem', 'Image Upload': 'Fazer atualizacao Da imagem', 'Image added': 'Imagem Adicionada', 'Image deleted': 'Imagem excluída', 'Image updated': 'Imagem atualizada', 'Image': 'Imagem', 'Imagery': 'Imagens', 'Images': 'Imagens', 'Impact Assessments': 'Avaliações de impacto', 'Impact Details': 'Detalhes de impacto', 'Impact Type Details': 'Detalhes dos tipos de impacto', 'Impact Type added': 'Tipo de impacto incluído', 'Impact Type deleted': 'Tipo de impacto excluído', 'Impact Type updated': 'Atualização dos tipos de impacto', 'Impact Type': 'Tipo de impacto', 'Impact Types': 'Tipos de impactos', 'Impact added': 'Impacto incluído', 'Impact deleted': 'Impacto excluído', 'Impact updated': 'Atualização de impacto', 'Impacts': 'Impactos', 'Import & Export Data': 'Importar & Exportar Dados', 'Import Data': 'Importar Dados', 'Import Jobs': 'Importar Tarefas', 'Import and Export': 'Importação e Exportação', 'Import from Ushahidi Instance': 'Importação da Instância Ushahidi', 'Import if Master': 'Importar se Mestre', 'Import multiple tables as CSV': 'Importar tabelas multiplas como CSV', 'Import': 'Importação', 'Import/Export': 'Importar/Exportar', 'Important': 'Importante', 'Importantly where there are no aid services being provided': 'Importante onde não há serviços de apoio a ser prestado', 'Importing data from spreadsheets': 'Importar dados de planilhas', 'Improper decontamination': 'Descontaminação Imprópria', 'Improper handling of dead bodies': 'Manipulação inadequada de cadáveres', 'In Catalogs': 'Em Catálogos', 'In Inventories': 'Em Inventários', 'In Process': 'Em Processo', 'In Progress': 'Em Progresso', 'In Window layout the map maximises to fill the window, so no need to set a large value here.': 'Maximize o ajuste da janela para preenche-la toda, desta forma não será necessário configurar para uso de fonte grande.', 'Inbound Mail Settings': 'Definições de correio de entrada', 'Incident Categories': 'Categorias Incidente', 'Incident Report Details': 'Detalhes do relatório de incidentes', 'Incident Report added': 'Relatório de Incidente incluído', 'Incident Report deleted': 'Relatório de Incidente excluído', 'Incident Report updated': 'Relatório de incidente atualizado', 'Incident Report': 'Relatório de Incidente', 'Incident Reporting System': 'Sistema de relatórios de incidentes', 'Incident Reporting': 'Relatório de incidentes', 'Incident Reports': 'Relatório de incidentes', 'Incident': 'Incidente', 'Incidents': 'incidentes', 'Incoming Shipment canceled': 'Chegada da encomenda cancelada', 'Incoming Shipment updated': 'Chegada de encomenda actualizada.', 'Incoming': 'Entrada', 'Incomplete': 'Incompleto', 'Individuals': 'Individuais', 'Industrial Crime': 'Crime Industrial', 'Industry Fire': 'Indústria Fogo', 'Infant (0-1)': 'Criança (0-1)', 'Infectious Disease (Hazardous Material)': 'Doenças infecciosas (Material perigoso)', 'Infectious Disease': 'Doença INFECCIOSA', 'Infectious Diseases': 'Doenças infecciosas', 'Infestation': 'Infestação', 'Informal Leader': 'Líder Informal', 'Informal camp': 'Acampamento Informal', 'Information gaps': 'problemas de informação', 'Infusion catheters available': 'Cateteres de infusão disponível', 'Infusion catheters need per 24h': 'Cateteres infusão necessário por 24 H', 'Infusion catheters needed per 24h': 'Cateteres infusão necessário por H', 'Infusions available': 'Infusões disponíveis', 'Infusions needed per 24h': 'Infusões necessário por 24H', 'Inspected': 'Inspecionado', 'Inspection Date': 'Data de Inspeção', 'Inspection date and time': 'Data e hora de inspeção', 'Inspection time': 'Hora da inspeção', 'Inspector ID': 'ID do Inspetor', 'Instant Porridge': 'Mingau Instantâneo', 'Institution': 'Instituição', 'Insufficient vars: Need module, resource, jresource, instance': 'Variaveis insuficientes: necessario modulo, recurso, jrecurso, instância', 'Insufficient': 'insuficiente', 'Intake Items': 'Itens de admissão', 'Intergovernmental Organization': 'Organização Intergovernamental', 'Interior walls, partitions': 'Do Interior das paredes, partições', 'Internal State': 'Estado Interno', 'International NGO': 'ONG internacional', 'International Organization': 'Organização Internacional', 'Interview taking place at': 'Entrevista em', 'Invalid Query': 'Consulta inválida', 'Invalid request!': 'Pedido inválido!', 'Invalid ticket': 'Bilhete Inválido', 'Invalid': 'Inválido', 'Inventories': 'Inventários.', 'Inventory Item Details': 'Detalhes do Item de inventário', 'Inventory Item added': 'Item incluído no inventário', 'Inventory Item deleted': 'Item do inventário excluído', 'Inventory Item updated': 'Item de Inventário atualizado', 'Inventory Item': 'Item do inventário', 'Inventory Items Available for Request Item': 'Itens de inventário disponíveis para Pedir um Item', 'Inventory Items include both consumable supplies & those which will get turned into Assets at their destination.': 'Itens de invenrário incluem ambos suprimentos consumíveis & aqueles que se transformarão em Ativos no seu destino.', 'Inventory Items': 'Itens do Inventário', 'Inventory Management': 'Gerenciamento de Inventário', 'Inventory functionality is available for:': 'Inventário de funcionalidades esta disponível para:', 'Inventory of Effects': 'Inventário de Efeitos', 'Inventory': 'Inventário', 'Is editing level L%d locations allowed?': 'É permitido editar o nível dos locais L%d?', 'Is it safe to collect water?': 'É seguro coletar água?', 'Is this a strict hierarchy?': 'Esta é uma hierarquia rigorosa?', 'Issuing Authority': 'Autoridade emissora', 'It captures not only the places where they are active, but also captures information on the range of projects they are providing in each area.': 'Ele captura não apenas os locais onde elas estão ativas, mas também captura informações sobre o conjunto de projetos que está fornecendo em cada região.', 'Item Added to Shipment': 'Item Incluído para Embarque', 'Item Catalog Details': 'Detalhes do item do catálogo', 'Item Categories': 'Categorias do Item', 'Item Category Details': 'Detalhes da categoria de item', 'Item Category added': 'Categoria de item incluída', 'Item Category deleted': 'Categoria de item excluída', 'Item Category updated': 'Atualização da categoria de item', 'Item Category': 'Categoria do Item', 'Item Details': 'Detalhes do item', 'Item Pack Details': 'Detalhes do pacote de itens', 'Item Pack added': 'Pacote de itens', 'Item Pack deleted': 'Pacote de itens excluído', 'Item Pack updated': 'Itens de Pacote atualizados', 'Item Packs': 'Item de Pacotes', 'Item added to Inventory': 'Itens adicionados ao Inventário', 'Item added to shipment': 'Item incluído para embarque', 'Item added': 'Item incluído', 'Item already in Bundle!': 'Item já no pacote configurável!', 'Item already in Kit!': 'Item já no Kit!', 'Item already in budget!': 'Item já no Orçamento!', 'Item deleted': 'Item Excluído', 'Item removed from Inventory': 'Item removido do Inventário', 'Item updated': 'Item atualizado', 'Items in Category can be Assets': 'itens na categoria podem ser ativos', 'Items': 'Itens', 'Japanese': 'japonês', 'Jerry can': 'Jerry pode', 'Jew': 'Judeu', 'Job Market': 'Mercado de trabalho', 'Job Role Catalog': 'Catalogo de Funçao de trabalho', 'Job Role Details': 'Detalhes da Função', 'Job Role added': 'funçao de trabalho inclusa', 'Job Role deleted': 'Funçao de trabalho excluida', 'Job Role updated': 'Função actualizada', 'Job Role': 'Função de trabalho', 'Job Roles': 'Funções', 'Job Title': 'Título do Cargo', 'Jobs': 'Tarefas', 'Journal Entry Details': 'Detalhes da Entrada de Diário', 'Journal entry added': 'Entrada de diário incluída', 'Journal entry deleted': 'Entrada de diário removida', 'Journal entry updated': 'Entrada de diário atualizado', 'Journal': 'Diário', 'Key Details': 'Detalhes da Chave', 'Key added': 'Chave adicionada', 'Key deleted': 'Chave removida', 'Key updated': 'Chave actualizada', 'Key': 'Tecla', 'Keys': 'Teclas', 'Kit Contents': 'Conteúdo Kit', 'Kit Details': 'Detalhes do Kit', 'Kit Updated': 'Kit de Atualização', 'Kit added': 'Pacote adicionado', 'Kit deleted': 'Kit excluído', 'Kit updated': 'Kit de atualização', 'Kit': 'kit', 'Known Identities': 'Identidades conhecido', 'Known incidents of violence against women/girls': 'Incidentes de violência conhecidos contra mulheres/garotas', 'Known incidents of violence since disaster': 'Incidentes de violência conhecidos desde o desastre', 'LICENSE': 'LICENÇA', 'Lack of material': 'Falta de material', 'Lack of school uniform': 'Falta de uniforme escolar', 'Lack of supplies at school': 'Falta de suprimentos na escola', 'Lack of transport to school': 'Falta de transporte escolar', 'Lactating women': 'Mulheres lactantes', 'Landslide': 'Deslizamento', 'Language': 'Linguagem', 'Last Name': 'sobrenome', 'Last known location': 'Último local conhecido', 'Last synchronization time': 'Horário da última sincronização', 'Last updated by': 'Última atualização por', 'Last updated on': 'Última Atualização em', 'Last updated': 'Última atualização', 'Latitude is North-South (Up-Down).': 'Latitude é sentido norte-sul (emcima-embaixo).', 'Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere.': 'Latitude é zero na linha do Equador, positiva no hemisfério norte e negativa no hemisfério sul.', 'Latitude of Map Center': 'Latitude DO MAPA Centro', 'Latitude of far northern end of the region of interest.': 'Latitude do extremo Norte longe do Região de interesse.', 'Latitude of far southern end of the region of interest.': 'Latitude da extremidade sul longe do Região de interesse.', 'Latitude should be between': 'Latitude deve estar entre', 'Latrines': 'Privadas', 'Law enforcement, military, homeland and local/private security': 'Execução da lei militar, interna e segurança local/privada', 'Layer Details': 'Detalhes de Camada', 'Layer added': 'Camada incluída', 'Layer deleted': 'Camada excluída', 'Layer updated': 'Camada atualizada', 'Layer': 'Camada', 'Layers updated': 'Camadas atualizadas', 'Layers': 'Camadas', 'Layout': 'Modelo', 'Leader': 'guia', 'Legend Format': 'Formato da Legenda', 'Length (m)': 'Comprimento (m)', 'Level 1 Assessment Details': 'Detalhes da Avaliação Nível 1', 'Level 1 Assessment added': 'Avaliação Nível 1 incluído', 'Level 1 Assessment deleted': 'Avaliação Nível 1 excluído', 'Level 1 Assessment updated': 'Avaliação Nível 1 atualizada', 'Level 1 Assessments': 'Avaliações Nível 1', 'Level 1': 'Nível 1', 'Level 2 Assessment Details': 'Nível 2 de avaliação Detalhado', 'Level 2 Assessment added': 'Nível 2 avaliação incluído', 'Level 2 Assessment deleted': 'Nível 2 de avaliação excluído', 'Level 2 Assessment updated': 'Nível 2 de avaliação atualizada', 'Level 2 Assessments': 'Nível 2 de Avaliações', 'Level 2 or detailed engineering evaluation recommended': 'Nível 2 ou engenharia detalhada de avaliação recomendado', 'Level 2': 'nível 2', 'Level': 'Nível', 'Library support not available for OpenID': 'Apoio de biblioteca não está disponível para OpenID', 'LineString': 'cadeia-de-linhas', 'List / Add Baseline Types': 'Lista / Incluir Linha de Tipos', 'List / Add Impact Types': 'Lista / Incluir Tipos de Impacto', 'List / Add Services': 'Lista / Incluir Serviços', 'List / Add Types': 'Lista / Incluir Tipos', 'List Activities': 'listar atividades', 'List All Assets': 'Lista todos os ativos', 'List All Catalog Items': 'Lista todos os Itens Do Catálogo', 'List All Commitments': 'Lista todos os compromissos', 'List All Entries': 'Listar todas as entradas', 'List All Item Categories': 'Lista todos os itens Categorias', 'List All Memberships': 'Listar Todas As Associações', 'List All Received Shipments': 'Lista todas as transferências Recebidas', 'List All Records': 'Lista todos os registros', 'List All Reports': 'Listar todos os Relatórios', 'List All Requested Items': 'Lista Todos Os itens solicitados', 'List All Requests': 'Lista Todos Os Pedidos', 'List All Sent Shipments': 'Listar todos os embarques enviados', 'List All': 'Mostrar Tudo', 'List Alternative Items': 'Listar Itens Alternativos', 'List Assessment Summaries': 'Listar Resumos das Avaliações', 'List Assessments': 'Listar as Avaliações', 'List Asset Assignments': 'Listar Atribuições de Ativos', 'List Assets': 'Listar Ativos', 'List Availability': 'Listar Disponibilidade', 'List Baseline Types': 'Lista de Tipos De Linha', 'List Baselines': 'Lista de Linhas', 'List Brands': 'Lista de Marcas', 'List Budgets': 'Listar Orçamentos', 'List Bundles': 'Listar Pacotes', 'List Camp Services': 'Listar Serviços de Acampamento', 'List Camp Types': 'Listar Tipos de Acampamentos', 'List Camps': 'Listar Acampamentos', 'List Catalog Items': 'Lista de Itens Do Catálogo', 'List Catalogs': 'Listar catálogos', 'List Certificates': 'Listar certificados', 'List Certifications': 'Listar certificações', 'List Checklists': 'Lista Listas de Verificação.', 'List Cluster Subsectors': 'Lista Subsetores de Cluster', 'List Clusters': 'Lista Clusters', 'List Commitment Items': 'Lista Itens de Compromisso', 'List Commitments': 'Lista Compromissos', 'List Competencies': 'Listar competencias', 'List Competency Ratings': 'Listar classificações de competencias', 'List Conflicts': 'Lista Conflitos', 'List Contact Information': 'Listar informações do contato', 'List Contacts': 'Listar contatos', 'List Course Certificates': 'Listar certificados de cursos', 'List Courses': 'Listar Cursos', 'List Credentials': 'Listar credenciais', 'List Current': 'Lista Atual', 'List Documents': 'Listar documentos', 'List Donors': 'Listar doadores', 'List Events': 'Lista de Eventos', 'List Facilities': 'Lista de Facilidades', 'List Feature Layers': 'Listar Camadas de Recursos', 'List Flood Reports': 'Listar Relatórios de Inundações', 'List Groups': 'Listar grupos', 'List Groups/View Members': 'Listar Grupos/visualizar membros', 'List Hospitals': 'Listar de Hospitais', 'List Human Resources': 'Lista de Recursos Humanos', 'List Identities': 'Lista de Identidades', 'List Images': 'Lista de Imagens', 'List Impact Assessments': 'Lista de Avaliações De Impacto', 'List Impact Types': 'Lista de Tipos De Impacto', 'List Impacts': 'Lista de impactos', 'List Incident Reports': 'Lista de relatórios de incidentes', 'List Inventory Items': 'Listar ítens de inventário', 'List Item Categories': 'Listar categorias de ítens', 'List Item Packs': 'Lista pacotes de itens', 'List Items in Inventory': 'Lista de Itens no inventário', 'List Items': 'Listar itens', 'List Job Roles': 'Listar cargos', 'List Keys': 'Listar Chaves', 'List Kits': 'LISTAR Kits', 'List Layers': 'Listar Camadas', 'List Level 1 Assessments': 'Listar avaliações nível 1', 'List Level 1 assessments': 'Listar avaliação nível 1', 'List Level 2 Assessments': 'Listar avaliações nível 2', 'List Level 2 assessments': 'Listar avaliações nível 2', 'List Locations': 'Listar Localizações', 'List Log Entries': 'Listar as entradas de log', 'List Map Configurations': 'Listar configurações de mapa', 'List Markers': 'Listar marcadores', 'List Members': 'Lista de membros', 'List Memberships': 'Lista de associados', 'List Messages': 'Listar Mensagens', 'List Missing Persons': 'Lista de pessoas desaparecidas', 'List Missions': 'Listar Missões', 'List Need Types': 'Listar tipos de necessidades', 'List Needs': 'Lista de Necessidades', 'List Notes': 'Lista de Notas', 'List Offices': 'Lista de Escritórios', 'List Organizations': 'Listar Organizações', 'List Peers': 'LISTA DE PARES', 'List Personal Effects': 'Lista de objetos pessoais', 'List Persons': 'LISTA DE PESSOAS', 'List Photos': 'Lista de Fotos', 'List Population Statistics': 'Lista das Estatisticas da População', 'List Positions': 'Lista de Posições', 'List Problems': 'Lista de Problemas', 'List Projections': 'Lista de Projeções', 'List Projects': 'Listar Projectos', 'List Rapid Assessments': 'Listar Avaliações Rápidas', 'List Received Items': 'Listar Elementos Recebidos', 'List Received Shipments': 'Listar Carga Recebida', 'List Records': 'Listar Registros', 'List Registrations': 'Listar Registrações', 'List Reports': 'Relatórios de Listas', 'List Request Items': 'Pedido de Itens de lista', 'List Requests': 'LISTA DE PEDIDOS', 'List Resources': 'Listar Recursos', 'List Rivers': 'Lista de Rios', 'List Roles': 'Listar Funções', 'List Rooms': 'Listar Salas', 'List Scenarios': 'Listar cenários', 'List Sections': 'lista de Seções', 'List Sectors': 'Lista de Sectores', 'List Sent Items': 'Os itens da lista Enviada', 'List Sent Shipments': 'Embarques lista Enviada', 'List Service Profiles': 'Lista de serviços Perfis', 'List Settings': 'Lista de configurações', 'List Shelter Services': 'Lista de serviços de abrigo', 'List Shelter Types': 'Lista de Tipos De Abrigo', 'List Shelters': 'Lista de Abrigos', 'List Skill Equivalences': 'LISTA DE HABILIDADE Equivalências', 'List Skill Provisions': 'Listar suprimento de habilidades', 'List Skill Types': 'Lista de Tipos De Habilidade', 'List Skills': 'LISTA DE HABILIDADES', 'List Solutions': 'Listar Soluções', 'List Staff Members': 'Listar funcionários', 'List Staff Types': 'Listar Tipos De Equipe', 'List Staff': 'Listar Pessoal', 'List Status': 'Listar Status', 'List Subscriptions': 'Lista de Assinaturas', 'List Subsectors': 'Listar Subsetores', 'List Support Requests': 'Listar Pedidos de Suporte', 'List Survey Answers': 'Listar Respostas de Pesquisa', 'List Survey Questions': 'Listar Perguntas da Pesquisa', 'List Survey Sections': 'Listar Seções da Pesquisa', 'List Survey Series': 'Listar Séries de Pesquisa', 'List Survey Templates': 'Listar Modelos de Pesquisa', 'List Tasks': 'Lista de Tarefas', 'List Teams': 'Lista de Equipes', 'List Themes': 'Lista de Temas', 'List Tickets': 'lista de Bilhetes', 'List Tracks': 'Rastreia lista', 'List Trainings': 'Listar Treinamentos', 'List Units': 'Lista de Unidades', 'List Users': 'Mostrar usuários', 'List Volunteers': 'Mostrar Voluntários', 'List Warehouses': 'Mostrar Depósitos', 'List all': 'Mostrar tudo', 'List available Scenarios': 'Listar Cenários Disponíveis', 'List of Items': 'Lista de Itens', 'List of Missing Persons': 'Lista de pessoas desaparecidas', 'List of Peers': 'Lista de pares', 'List of Reports': 'Lista de Relatórios', 'List of Requests': 'Lista de Pedidos', 'List of Spreadsheets uploaded': 'Lista de Folhas de Cálculo transferidas', 'List of Spreadsheets': 'Lista de Folhas de Cálculo', 'List of Volunteers for this skill set': 'Lista de Voluntários para este conjunto de competências', 'List of Volunteers': 'Lista de Voluntários', 'List of addresses': 'Lista de endereços', 'List unidentified': 'Lista não identificada', 'List': 'Listar', 'List/Add': 'Lista/incluir', 'Lists "who is doing what & where". Allows relief agencies to coordinate their activities': 'Lista "quem está fazendo o que & aonde". Permite a agências humanitárias coordenar suas atividades', 'Live Help': 'Ajuda ao vivo', 'Livelihood': 'Subsistência', 'Load Cleaned Data into Database': 'Carregue Informações Claras no Banco de Dados', 'Load Raw File into Grid': 'Carregamento de arquivo bruto na Grid', 'Loading': 'Carregando', 'Local Name': 'Nome local', 'Local Names': 'Nomes locais', 'Location 1': 'Local 1', 'Location 2': 'Local 2', 'Location Details': 'Detalhes da Localização', 'Location Hierarchy Level 0 Name': 'Nivel Local de hierarquia 0 nome', 'Location Hierarchy Level 1 Name': 'Nivel local de hierarquia 1 nome', 'Location Hierarchy Level 2 Name': 'Nivel local de hierarquia 2 nome', 'Location Hierarchy Level 3 Name': 'Hierarquia local Nível 3 Nome', 'Location Hierarchy Level 4 Name': 'Hierarquia local Nível 4 Nome', 'Location Hierarchy Level 5 Name': 'Hierarquia local Nível 5 Nome', 'Location added': 'Local incluído', 'Location cannot be converted into a group.': 'Local não pode ser convertido em um grupo.', 'Location deleted': 'Localidade excluída', 'Location details': 'Detalhes do Local', 'Location group cannot be a parent.': 'Localização de grupo não pode ser um pai.', 'Location group cannot have a parent.': 'Localização de grupo não tem um pai.', 'Location groups can be used in the Regions menu.': 'Grupos local pode ser utilizado no menu Regiões.', 'Location groups may be used to filter what is shown on the map and in search results to only entities covered by locations in the group.': 'Grupos locais podem ser utilizados para filtrar o que é mostrado no mapa e nos resultados da procura apenas as entidades locais abrangidas no grupo.', 'Location updated': 'Local atualizado', 'Location': 'Localização', 'Location:': 'Localização:', 'Locations of this level need to have a parent of level': 'Locais de esse nível precisa ter um pai de nível', 'Locations': 'Localizações', 'Lockdown': 'BLOQUEIO', 'Log Entry Details': 'detalhes da entrada de registro', 'Log entry added': 'Entrada de Log incluída', 'Log entry deleted': 'Entrada de Log Excluída', 'Log entry updated': 'Entrada de Log de atualização', 'Log': 'registro', 'Login': 'login', 'Logistics Management System': 'Sistema de Gestão de Logística', 'Logistics': 'Logística', 'Logo file %s missing!': 'Arquivo de logotipo %s ausente!', 'Logo': 'Logotipo', 'Logout': 'Deslogar', 'Long Text': 'Texto Longo', 'Longitude is West - East (sideways).': 'Longitude é Oeste - Leste (lateral).', 'Longitude is West-East (sideways).': 'Longitude é leste-oeste (direções).', 'Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.': 'Longitude é zero no primeiro meridiano (Greenwich Mean Time) e é positivo para o leste, em toda a Europa e Ásia. Longitude é negativo para o Ocidente, no outro lado do Atlântico e nas Américas.', 'Longitude is zero on the prime meridian (through Greenwich, United Kingdom) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.': 'Longitude é zero no primeiro meridiano (por meio de Greenwich, Reino Unido) e é positivo para o leste, em toda a Europa e Ásia. Longitude é negativo para o Ocidente, no outro lado do Atlântico e nas Américas.', 'Longitude of Map Center': 'Longitude do Centro do Mapa', 'Longitude of far eastern end of the region of interest.': 'Longitude longe do Oeste no final da região de interesse.', 'Longitude of far western end of the region of interest.': 'Longitude de oeste longínquo no final da Região de interesse.', 'Longitude should be between': 'Longitude deve estar entre', 'Looting': 'Saques', 'Lost Password': 'Senha Perdida', 'Lost': 'Perdido', 'Low': 'Baixo', 'Magnetic Storm': 'Tempestade magnética', 'Major Damage': 'Grandes danos', 'Major expenses': 'Despesas principais', 'Major outward damage': 'Danos exteriores principais', 'Make Commitment': 'Ter obrigação', 'Make New Commitment': 'Fazer Novo Compromisso', 'Make Request': 'Fazer Pedido', 'Make preparations per the <instruction>': 'Fazer Preparações por', 'Male': 'masculino', 'Manage Relief Item Catalogue': 'Gerenciar Catálogo de Item de Alívio', 'Manage Users & Roles': 'GERENCIAR Usuários & Funções', 'Manage Warehouses/Sites': 'GERENCIAR Armazéns/Sites', 'Manage Your Facilities': 'Gerenciar suas instalações', 'Manage requests for supplies, assets, staff or other resources. Matches against Inventories where supplies are requested.': 'Gerenciar pedidos de suprimentos, patrimônio, pessoal ou outros recursos. Corresponde aos estoques onde os suprimentos são solicitados.', 'Manage requests of hospitals for assistance.': 'GERENCIAR Pedidos de hospitais para obter assistência.', 'Manage volunteers by capturing their skills, availability and allocation': 'GERENCIAR voluntários por captura sua capacidade, Alocação e disponibilidade', 'Manage': 'Gerenciar', 'Manager': 'Gerente', 'Managing Office': 'Gerenciando Office', 'Mandatory. In GeoServer, this is the Layer Name. Within the WFS getCapabilities, this is the FeatureType Name part after the colon(:).': 'Obrigatório. Em GeoServer, este é o nome Da Camada. No getCapabilities WFS, este é o nome da parte FeatureType após os dois pontos (:).', 'Mandatory. The URL to access the service.': 'Obrigatório. A URL para acessar o serviço.', 'Manual Synchronization': 'Sincronização Manual', 'Many': 'Muitos', 'Map Center Latitude': 'Latitude do Centro do Mapa', 'Map Center Longitude': 'Longitude do centro do mapa', 'Map Configuration Details': 'Detalhes de configuração de mapa', 'Map Configuration added': 'Configuração de mapa incluído', 'Map Configuration deleted': 'Configuração de mapa excluído', 'Map Configuration removed': 'Configuração de mapa removido', 'Map Configuration updated': 'Configuração de mapa atualizada', 'Map Configuration': 'Configuração de Mapa', 'Map Configurations': 'Configuracões de mapa', 'Map Height': 'Altura do Mapa', 'Map Service Catalog': 'Catálogo do serviço de mapas', 'Map Settings': 'Configurações do Mapa', 'Map Viewing Client': 'Cliente de visualização do mapa', 'Map Width': 'Largura do mapa', 'Map Zoom': 'Zoom do mapa', 'Map of Hospitals': 'Mapa de Hospitais', 'Map': 'Mapa', 'Marine Security': 'Segurança da marina', 'Marital Status': 'Estado Civil', 'Marker Details': 'Detalhes do Marcador', 'Marker added': 'Marcador incluído', 'Marker deleted': 'Marcador removido', 'Marker updated': 'Marcador atualizado', 'Marker': 'Marcador', 'Markers': 'Marcadores', 'Master Message Log to process incoming reports & requests': 'Log de Mensagem Principal para processar relatórios de entrada e pedidos', 'Master Message Log': 'Mensagem de Log principal', 'Match Percentage': 'Porcentagem de correspondência', 'Match Requests': 'Corresponder Pedidos', 'Match percentage indicates the % match between these two records': 'Porcentagem idêntica indica a % idêntica entre estes dois registros.', 'Match?': 'Combina?', 'Matching Catalog Items': 'Catálogo de itens correspondentes', 'Matching Items': 'Itens correspondentes', 'Matching Records': 'Registros de correspondência', 'Matrix of Choices (Multiple Answers)': 'Matrix de Opções (Respostas Múltiplas)', 'Matrix of Choices (Only one answer)': 'Matrix de Opções (Apenas uma resposta)', 'Matrix of Text Fields': 'Matriz de campos de texto', 'Max Persons per Dwelling': 'Máx. Pessoas por Habitação', 'Maximum Location Latitude': 'Latitude máxima local', 'Maximum Location Longitude': 'Longitude máxima local', 'Medical and public health': 'Saúde Médica e Pública', 'Medium': 'Médio', 'Megabytes per Month': 'Megabytes por mês', 'Member removed from Group': 'Associação Excluída', 'Members': 'membros', 'Membership Details': 'Detalhes de Associação', 'Membership updated': 'Associação ATUALIZADO', 'Membership': 'Membresia', 'Memberships': 'Parcelas', 'Message Details': 'deatlhes de mesagens', 'Message Variable': 'Mensagem variável', 'Message added': 'Mensagem incluída', 'Message deleted': 'Mensagem Excluída', 'Message field is required!': 'Campo mensagem é obrigatório!', 'Message updated': 'Mensagem atualizada', 'Message variable': 'Mensagem variável', 'Message': 'message', 'Messages': 'mensagens.', 'Messaging settings updated': 'Configurações de mensagens atualizadas', 'Messaging': 'sistema de mensagens', 'Meteorite': 'Meteorito', 'Meteorological (inc. flood)': 'Meteorológico (inc. Enchente)', 'Method used': 'Método utilizado', 'Middle Name': 'Nome do meio', 'Migrants or ethnic minorities': 'Imigrantes ou minorias étnicas', 'Military': 'Militares', 'Minimum Bounding Box': 'Caixa Delimitadora Mínima', 'Minimum Location Latitude': 'Mínimo Latitude de Localidade', 'Minimum Location Longitude': 'Longitude de Localização Mínima', 'Minimum shift time is 6 hours': 'tempo mínimo de Shift é de 6 horas', 'Minor Damage': 'Dano secundário', 'Minor/None': 'Secundária/Nenhum', 'Minorities participating in coping activities': 'Minorias participando em atividades de cópia', 'Minute': 'Minuto', 'Minutes must be a number between 0 and 60': 'Minutos devem ser um número entre 0 e 60', 'Minutes per Month': 'Minutos por Mês', 'Minutes should be a number greater than 0 and less than 60': 'Minutos devem ser um número maior que 0 e menor que 60', 'Miscellaneous': 'Variados', 'Missing Person Details': 'Detalhes da pessoa perdida', 'Missing Person Registry': 'Faltando Registro da Pessoa', 'Missing Person Reports': 'Relatórios da pessoa desaparecida', 'Missing Person': 'Pessoa desaparecida', 'Missing Persons Registry': 'Registro de pessoas desaparecidas', 'Missing Persons Report': 'Relatório de pessoas desaparecidas', 'Missing Persons': 'Pessoas desaparecidas', 'Missing Report': 'Relatório de desaparecimento', 'Missing Senior Citizen': 'Cidadão sênior desaparecido', 'Missing Vulnerable Person': 'Pessoa vulnerável desaparecida', 'Missing': 'Perdido', 'Mission Details': 'Detalhes da Missão', 'Mission Record': 'Registro da Missão', 'Mission added': 'Missão incluída', 'Mission deleted': 'Missão excluída', 'Mission updated': 'Missão atualizada', 'Missions': 'Missões', 'Mobile Basic Assessment': 'Taxação básica móvel', 'Mobile Phone': 'Telefone celular', 'Mobile': 'telefone celular', 'Mode': 'modo', 'Model/Type': 'Modelo/Tipo', 'Modem Settings': 'Configurações do Modem', 'Modem settings updated': 'Configurações de modem atualizadas', 'Moderate': 'moderate', 'Moderator': 'moderator', 'Modify Information on groups and individuals': 'Modificar Informações sobre grupos e pessoas', 'Modifying data in spreadsheet before importing it to the database': 'Modificando dados na planilha antes de importá-los para o banco de dados', 'Module disabled!': 'Módulo desativado!', 'Module provides access to information on current Flood Levels.': 'Módulo fornece acesso a informações na atual Onda níveis.', 'Module': 'Módulo', 'Monday': 'segunda-feira', 'Monthly Cost': 'Custo mensal', 'Monthly Salary': 'Salário mensal', 'Months': 'meses', 'Morgue Status': 'Situação do necrotério', 'Morgue Units Available': 'Unidades disponíveis no necrotério', 'Mosque': 'Mesquita', 'Motorcycle': 'Motocicleta', 'Moustache': 'Bigode', 'MultiPolygon': 'multipolygon', 'Multiple Choice (Multiple Answers)': 'Múltipla escolha (Várias Respostas)', 'Multiple Choice (Only One Answer)': 'Múltipla Escolha (Apenas uma resposta)', 'Multiple Matches': 'Múltiplas Correspondências', 'Multiple Text Fields': 'Vários campos de texto', 'Multiple': 'Múltiplos', 'Muslim': 'Muçulmano', 'Must a location have a parent location?': 'Um local deve ter uma posição pai?', 'My Current function': 'Minha função Atual', 'My Tasks': 'Minhas tarefas', 'N/A': 'n/d', 'NO': 'no', 'NZSEE Level 1': 'NZSEE Nível 1', 'NZSEE Level 2': 'NZSEE Nível 2', 'Name and/or ID': 'Nome E/OU ID', 'Name of the file (& optional sub-path) located in static which should be used for the background of the header.': 'O nome do arquivo (& sub OPCIONAL-path) localizado no estáticamente que deve ser utilizado para o segundo plano do Cabeçalho.', 'Name of the file (& optional sub-path) located in static which should be used for the top-left image.': 'Nome do arquivo (e sub-caminho opcional) localizado estático que deveria ser utilizado para a imagem superior esquerda.', 'Name of the file (& optional sub-path) located in views which should be used for footer.': 'Nome do arquivo (e sub-caminho opcional) localizado nas visualizações que deve ser utilizado no rodapé.', 'Name of the person in local language and script (optional).': 'Nome da pessoa no idioma local e script local (opcional).', 'Name or Job Title': 'Nome ou cargo', 'Name': 'nome', 'Name, Org and/or ID': 'Nome, organização e/ou ID.', 'Name/Model/Type': 'Nome/Modelo/Tipo', 'Names can be added in multiple languages': 'Nomes podem ser adicionados em múltiplos idiomas', 'National ID Card': 'Cartão de ID Nacional', 'National NGO': 'Nacional ONG', 'National': 'Nacional', 'Nationality of the person.': 'Nacionalidade da pessoa.', 'Nationality': 'Nacionalidade', 'Nautical Accident': 'Acidente Náutico', 'Nautical Hijacking': 'Sequestro Náutico', 'Need Type Details': 'Tipo precisa de Detalhes', 'Need Type added': 'Precisa de tipo incluído', 'Need Type deleted': 'Precisa de Tipo excluído', 'Need Type updated': 'Tipo de necessidade atualizada', 'Need Type': 'Precisa de Tipo', 'Need Types': 'Tipos de necessidade', 'Need added': 'Necessidade incluída', 'Need deleted': 'Necessidade excluída', 'Need to be logged-in to be able to submit assessments': 'Precisa estar conectado ao programa para conseguir submeter avaliações', 'Need to configure Twitter Authentication': 'Precisa configurar a autenticação do Twitter', 'Need to specify a Budget!': 'É necessário especificar um orçamento!', 'Need to specify a Kit!': 'É necessário especificar um Kit!', 'Need to specify a Resource!': 'É necessário especificar um recurso!', 'Need to specify a bundle!': 'É necessário especificar um pacote!', 'Need to specify a group!': 'É necessário especificar um grupo!', 'Need to specify a location to search for.': 'É necessário especificar um local para procurar.', 'Need to specify a role!': 'Será necessário especificar um papel!', 'Need to specify a table!': 'Será necessário especificar uma tabela!', 'Need to specify a user!': 'Será necessário especificar um usuário!', 'Need updated': 'Precisa de atualização', 'Needs Details': 'detalhes necessarios', 'Needs Maintenance': 'Necessita Manutenção', 'Needs to reduce vulnerability to violence': 'Necessidade de reduzir a vulnerabilidade à violência.', 'Needs': 'necessidades', 'Negative Flow Isolation': 'NEGATIVO Fluxo ISOLAMENTO', 'Neighborhood': 'Bairro', 'Neighbouring building hazard': 'Risco de construção vizinhos', 'Neonatology': 'Neonatologia', 'Network': 'rede', 'Neurology': 'Neurologia', 'New Assessment reported from': 'Nova Avaliação relatada a partir de', 'New Certificate': 'Novo Certificado', 'New Checklist': 'Nova Verificação', 'New Entry': 'Nova Entrada', 'New Event': 'Novo Evento', 'New Item Category': 'Nova Categoria de Ítem', 'New Job Role': 'Novo Papel', 'New Location Group': 'Novo Grupo de Locais', 'New Location': 'Novo Local', 'New Peer': 'Novo Par', 'New Record': 'Novo Registro', 'New Request': 'Nova Requisição', 'New Scenario': 'Novo Cenário', 'New Skill': 'Nova Habilidade', 'New Solution Choice': 'Escolha nova solução', 'New Staff Member': 'Novo membro da equipe', 'New Support Request': 'Novo pedido de suporte', 'New Synchronization Peer': 'Novo par de sincronização', 'New Team': 'Nova equipe', 'New Training Course': 'Novo Curso de Treinamento', 'New Volunteer': 'Novo Voluntário', 'New cases in the past 24h': 'Novos casos nas últimas 24H', 'New': 'Novo(a)', 'News': 'Notícias', 'Next': 'Seguinte', 'No Activities Found': 'Não há actividades', 'No Alternative Items currently registered': 'Nenhum item alternativo atualmente registrado', 'No Assessment Summaries currently registered': 'Nenhum Sumário De Avaliação actualmente registrado', 'No Assessments currently registered': 'Nenhuma Avaliação actualmente registrada', 'No Asset Assignments currently registered': 'Nenhum ativo designado encontra-se atualmente registrado', 'No Assets currently registered in this event': 'Sem ativos atualmente registrados neste evento', 'No Assets currently registered in this scenario': 'Sem ativos atualmente registrados neste cenário', 'No Assets currently registered': 'Sem Ativos registrados atualmente', 'No Baseline Types currently registered': 'Nenhum tipo de base line registrado atualmente', 'No Baselines currently registered': 'Nenhuma linha base registrada atualmente', 'No Brands currently registered': 'Sem Marcas atualmente registrado', 'No Budgets currently registered': 'Nenhum Dos Orçamentos registrados atualmente', 'No Bundles currently registered': 'Nenhum pacote atualmente registrado', 'No Camp Services currently registered': 'Nenhum serviço de acampamento atualmente registrado', 'No Camp Types currently registered': 'Nenhum tipo de acampamento atualmente registrado', 'No Camps currently registered': 'Sem Acampamentos atualmente registrados', 'No Catalog Items currently registered': 'Nenhum itens do catálogo registrado atualmente', 'No Catalogs currently registered': 'Nenhum catálogo atualmente registrado', 'No Checklist available': 'Checklist não disponível', 'No Cluster Subsectors currently registered': 'Nenhum sub-setor de cluster registrado atualmente', 'No Clusters currently registered': 'Nenhum Cluster registrado atualmente', 'No Commitment Items currently registered': 'Nenhum Item de Compromisso registrado atualmente', 'No Commitments': 'Sem Compromissos', 'No Credentials currently set': 'Nenhuma credencial atualmente configurada', 'No Details currently registered': 'Nenhum detalhes registrado atualmente', 'No Documents found': 'Nenhum Documento encontrado', 'No Donors currently registered': 'Sem doadores registrados atualmente', 'No Events currently registered': 'Não há eventos atualmente registrados', 'No Facilities currently registered in this event': 'Não há Recursos atualmente registrado nesse evento', 'No Facilities currently registered in this scenario': 'Não há recursos atualmente registrados neste cenário', 'No Feature Layers currently defined': 'Nenhuma Camada de Componentes atualmente definidos', 'No Flood Reports currently registered': 'Nenhum relatório de Inundação atualmente registrado', 'No Groups currently defined': 'Não há Grupos definidos atualmente', 'No Groups currently registered': 'Nenhum Grupo atualmente registrado', 'No Hospitals currently registered': 'Nenhum hospital atualmente registrado', 'No Human Resources currently registered in this event': 'Nao há recursos humanos atualmente registrados nesse evento', 'No Human Resources currently registered in this scenario': 'Sem recursos humanos atualmente registrados neste cenário', 'No Identification Report Available': 'Nenhum Relatório de Identificação Disponível', 'No Identities currently registered': 'Nenhuma Identidade atualmente registrada', 'No Image': 'Nenhuma Imagem', 'No Images currently registered': 'Nenhuma Imagem atualmente registrada', 'No Impact Types currently registered': 'Nenhum tipo de impacto atualmente registrado', 'No Impacts currently registered': 'Nenhum Impacto atualmente registrado', 'No Incident Reports currently registered': 'Nenhum relatório de incidente registrado atualmente', 'No Incoming Shipments': 'Nenhum Embarque de Entrada', 'No Inventory Items currently registered': 'Nenhum Item de Inventário registrado atualmente', 'No Item Categories currently registered': 'Nenhuma Categoria de Item atualmente registrada', 'No Item Packs currently registered': 'Nenhum Pacote de Itens atualmente registrado', 'No Items currently registered in this Inventory': 'Sem itens registrados atualmente neste inventário', 'No Items currently registered': 'Nenhum item registrado atualmente', 'No Keys currently defined': 'Nenhuma chave definida no momento', 'No Kits currently registered': 'Nenhum kit registrado no momento', 'No Level 1 Assessments currently registered': 'Nenhuma avaliação nível 1 registrada no momento', 'No Level 2 Assessments currently registered': 'Nenhum nível 2 Avaliações atualmente registrado', 'No Locations currently available': 'Locais Não disponíveis atualmente', 'No Locations currently registered': 'Locais Não registrados atualmente', 'No Map Configurations currently defined': 'Nenhuma configuração de Mapa estão atualmente definidos', 'No Map Configurations currently registered in this event': 'Nenhuma configuração de Mapa esta atualmente registrado nesse evento', 'No Map Configurations currently registered in this scenario': 'Nenhuma configuração de Mapa está atualmente registrado neste cenário', 'No Markers currently available': 'Não há marcadores atualmente disponíveis', 'No Match': 'Sem correspondência', 'No Matching Catalog Items': 'Nenhum Item de Catálogo Correspondente', 'No Matching Items': 'Sem itens correspondentes', 'No Matching Records': 'Sem registros correspondentes', 'No Members currently registered': 'Sem membros registrados atualmente', 'No Memberships currently defined': 'Sem Associações definidas atualmente', 'No Messages currently in Outbox': 'Nenhuma mensagem na Caixa de saída', 'No Need Types currently registered': 'Sem necessidade, Tipos atualmente registrados', 'No Needs currently registered': 'Sem necessidade, atualmente registrado', 'No Offices currently registered': 'Nenhum Escritório registrado atualmente', 'No Offices found!': 'Menhum Escritório localizado!', 'No Organizations currently registered': 'Número de Organizações atualmente registradas', 'No People currently registered in this camp': 'Nenhuma pessoa registrada atualmente neste campo', 'No People currently registered in this shelter': 'Nenhuma pessoa registrada atualmente neste abrigo', 'No Persons currently registered': 'Nenhuma pessoa atualmente registrada', 'No Persons currently reported missing': 'nenhuma pessoa reportada atualmente como perdida', 'No Persons found': 'Nenhuma pessoa localizada', 'No Photos found': 'Nenhuma Foto localizada', 'No Picture': 'Nenhuma imagem', 'No Population Statistics currently registered': 'Nenhuma estatística populacional atualmente registrada', 'No Presence Log Entries currently registered': 'Nenhuma entrada no log Presença atualmente registrado', 'No Problems currently defined': 'Nenhum Problema atualmente definido', 'No Projections currently defined': 'Nenhuma projeção atualmente definida', 'No Projects currently registered': 'Nenhum projeto atualmente registrado', 'No Rapid Assessments currently registered': 'Nenhuma Tributação Rápida atualmente registrada', 'No Received Items currently registered': 'Nenhum item recebido atualmente registrado', 'No Received Shipments': 'Entregas/Despachos não recebidos', 'No Records currently available': 'Registros atualmente não disponíveis', 'No Request Items currently registered': 'Não há items de Pedidos registados', 'No Requests': 'Não há pedidos', 'No Rivers currently registered': 'Não Rios atualmente registrado', 'No Roles currently defined': 'Nenhumas funções atualmente definidas', 'No Rooms currently registered': 'Nenhuma sala atualmente registrada', 'No Scenarios currently registered': 'Nenhum cenário atualmente registrado', 'No Sections currently registered': 'Sem seções atualmente registradas', 'No Sectors currently registered': 'setores nao atualmente registrados', 'No Sent Items currently registered': 'Nenhum item Enviado atualmente registrado', 'No Sent Shipments': 'Nenhum carregamento enviado', 'No Settings currently defined': 'configuraçoes atualmente nao definida', 'No Shelter Services currently registered': 'nenhum serviço de abrigo atualmente registrado', 'No Shelter Types currently registered': 'Nenhum tipo de abrigo registrado atualmente', 'No Shelters currently registered': 'abrigos atualmente nao registrados', 'No Solutions currently defined': 'Sem Soluções actualmente definidas', 'No Staff Types currently registered': 'Sem Tipos de Funcionários actualmente registrados', 'No Staff currently registered': 'Sem Funcionários actualmente registrados', 'No Subscription available': 'Nenhuma assinatura disponível', 'No Subsectors currently registered': 'Nenhum sub setor atualmente registrado', 'No Support Requests currently registered': 'Nenhum suporte a pedido atualmente registrado', 'No Survey Answers currently entered.': 'Nenhuma resposta de pesquisa atualmente inscrita.', 'No Survey Answers currently registered': 'Nenhuma resposta a pesquisa atualmente registrada', 'No Survey Questions currently registered': 'Nenhuma pergunta de pesquisa atualmente registrada', 'No Survey Sections currently registered': 'Nenhuma seção de pesquisa atualmente registrada', 'No Survey Series currently registered': 'Nenhuma série de pesquisa atualmente registrada', 'No Survey Template currently registered': 'Nenhum Modelo de Pesquisa atualmente registrado', 'No Tasks with Location Data': 'Nenhuma tarefa com local de dados', 'No Teams currently registered': 'Nenhuma equipe atualmente registrada', 'No Themes currently defined': 'Nenhum Tema atualmente definido', 'No Tickets currently registered': 'Sem ingressos atualmente registrados', 'No Tracks currently available': 'nenhum rastreamento atualmente disponível', 'No Users currently registered': 'Nenhum Usuário actualmente registrado', 'No Volunteers currently registered': 'Nenhum Voluntário actualmente registrado', 'No Warehouses currently registered': 'Nenhum Armazém actualmente registrado', 'No access at all': 'Nenhum acesso', 'No access to this record!': 'Não há acesso a esta entrada!', 'No action recommended': 'Nenhuma acção recomendada', 'No conflicts logged': 'Nenhum conflito registrado', 'No contact information available': 'Nenhuma informações de contato disponível', 'No contacts currently registered': 'Nenhum contato atualmente registrado', 'No data in this table - cannot create PDF!': 'Nenhum dado nesta tabela- PDF não pode ser criado!', 'No databases in this application': 'Nenhum banco de dados neste aplicativo', 'No dead body reports available': 'Nenhum relatório de óbito disponível', 'No entries found': 'Nenhum artigo encontrado', 'No entries matching the query': 'Nenhuma entrada correspondente a consulta', 'No entry available': 'Nenhuma entrada disponível', 'No location known for this person': 'Nenhum local conhecido para essa pessoa', 'No locations found for members of this team': 'Locais não localizado para membros deste equipe', 'No log entries matching the query': 'Nenhuma entrada de log correspondente a consulta', 'No messages in the system': 'Nenhuma mensagem no sistema', 'No notes available': 'Notas não disponíveis', 'No peers currently registered': 'Não há pares registrados atualmente', 'No pending registrations found': 'Não foram encontrados registros pendentes', 'No pending registrations matching the query': 'Não foram encontrados registros pendentes correspondentes à consulta efetuada', 'No person record found for current user.': 'Nenhum registro de pessoa localizado para o usuário atual.', 'No problem group defined yet': 'Nenhum grupo problema definido ainda', 'No records matching the query': 'Sem registros correspondentes a consulta', 'No report available.': 'Nenhum Relatório disponível.', 'No reports available.': 'Não há relatórios disponíveis.', 'No reports currently available': 'Não há relatórios disponíveis actualmente', 'No requests found': 'Não foram foram encontrados pedidos', 'No resources currently reported': 'Recursos não reportados actualmente', 'No service profile available': 'Nenhum perfil de serviço disponível', 'No skills currently set': 'Não há habilidades atualmente configuradas', 'No staff members currently registered': 'Nenhum membro da equipe atualmente registrado', 'No staff or volunteers currently registered': 'Nenhum funcionário ou voluntário atualmente registrado', 'No status information available': 'Informação não está disponível', 'No synchronization': 'Sem Sincronização', 'No tasks currently registered': 'Nenhuma tarefa atualmente registrada', 'No template found!': 'Nenhum modelo localizado!', 'No units currently registered': 'Nenhuma unidade actualmente registrada', 'No volunteer availability registered': 'Sem disponibilidade de voluntário registrada', 'No volunteers currently registered': 'Nenhum Voluntário actualmente registrado', 'No': 'no', 'Non-structural Hazards': 'Riscos não-estruturais', 'None (no such record)': 'Nenhum (sem registro )', 'None': 'Nenhum', 'Noodles': 'Macarrão', 'Not Applicable': 'Não se aplica', 'Not Authorised!': 'Não Autorizado!', 'Not Possible': 'Impossível', 'Not Set': 'não configurado', 'Not Authorized': 'Não autorizado', 'Not installed or incorrectly configured.': 'Não instalado ou Configurado Incorretamente.', 'Not yet a Member of any Group': 'Sem Associações registradas atualmente', 'Note Details': 'Detalhes da Nota', 'Note Status': 'Status da Nota', 'Note Type': 'Tipo de nota', 'Note added': 'Nota Incluída', 'Note deleted': 'NOTA Excluída', 'Note that this list only shows active volunteers. To see all people registered in the system, search from this screen instead': 'Observer que essa lista mostra apenas voluntários ativos. Para ver todas as pessoas registradas no sistema, procure a partir deste ecrã em vez de', 'Note updated': 'Nota atualizada', 'Note': 'Nota', 'Notes': 'Observações', 'Notice to Airmen': 'Aviso ao piloto', 'Number of Columns': 'Número de colunas', 'Number of Patients': 'Número de Pacientes', 'Number of Rows': 'Número de Linhas', 'Number of additional beds of that type expected to become available in this unit within the next 24 hours.': 'Número de camas adicionais de tipo esperado tornar disponível nesta unidade nas próximas 24 horas.', 'Number of alternative places for studying': 'Número de locais alternativos para estudar', 'Number of available/vacant beds of that type in this unit at the time of reporting.': 'Número de camas disponíveis/livre desse tipo nesta unidade no momento do relatório.', 'Number of deaths during the past 24 hours.': 'Número de mortes durante as últimas 24 horas.', 'Number of discharged patients during the past 24 hours.': 'Número de pacientes Descarregados durante as últimas 24 horas.', 'Number of doctors': 'Número de médicos', 'Number of in-patients at the time of reporting.': 'Número de pacientes internos na hora do relatório.', 'Number of newly admitted patients during the past 24 hours.': 'Número de pacientes admitidos durante as últimas 24 horas.', 'Number of non-medical staff': 'Número de funcionários não-médico', 'Number of nurses': 'Número de enfermeiras', 'Number of private schools': 'Número de escolas privadas', 'Number of public schools': 'Número de escolas públicas', 'Number of religious schools': 'Número de escolas religiosas', 'Number of residential units not habitable': 'Unidades de número residencial não habitáveis', 'Number of residential units': 'Número de unidades residenciais', 'Number of vacant/available beds in this hospital. Automatically updated from daily reports.': 'Número de leitos vagos/disponíveis nesse hospital. Atualizado automaticamente a partir de relatórios diários.', 'Number of vacant/available units to which victims can be transported immediately.': 'Número de unidades vagas/disponíveis em que vítimas podem ser transportadas imediatamente.', 'Number or Label on the identification tag this person is wearing (if any).': 'Número ou código na etiqueta de identificação que a pessoa está usando (se houver).', 'Number or code used to mark the place of find, e.g. flag code, grid coordinates, site reference number or similar (if available)': 'Número ou código utilizado para marcar o local de localização, por exemplo, código de bandeira, grade de coordenadas, número de referência do site ou similar (se disponível)', 'Number': 'número', 'Number/Percentage of affected population that is Female & Aged 0-5': 'Número/percentagem da população afetada que é uma mulher entre 0 e 5 anos', 'Number/Percentage of affected population that is Female & Aged 13-17': 'Número/percentagem da população afetadas do sexo feminino entre 13 e 17 anos', 'Number/Percentage of affected population that is Female & Aged 18-25': 'Número/percentagem da população afetada que é Mulher com 18-25 anos', 'Number/Percentage of affected population that is Female & Aged 26-60': 'Número/percentagem da população afetada que é Mulher com 26-60 anos', 'Number/Percentage of affected population that is Female & Aged 6-12': 'Número/percentagem da população afetada que é Mulher com 6-12 anos', 'Number/Percentage of affected population that is Female & Aged 61+': 'Número/percentagem da população afetada que é Mulher > 61 anos', 'Number/Percentage of affected population that is Male & Aged 0-5': 'Número/percentagem da população afetada que é Homem com 0-5 anos', 'Number/Percentage of affected population that is Male & Aged 13-17': 'Número/percentagem da população afetada que é Homem com 13-17 anos', 'Number/Percentage of affected population that is Male & Aged 18-25': 'Número/percentagem da população afetada que é Homem com 18-25 anos', 'Number/Percentage of affected population that is Male & Aged 26-60': 'Número/percentagem de população afetada que é do sexo masculino & Idade 26-60', 'Number/Percentage of affected population that is Male & Aged 6-12': 'Número/percentagem de população afectada que é do sexo masculino & Idade 6-12', 'Number/Percentage of affected population that is Male & Aged 61+': 'Número/percentagem da população afetada que é do sexo masculino & Idade 61+', 'Nursery Beds': 'Camas de berçario', 'Nutrition problems': 'Problemas nutricionais', 'Nutrition': 'Nutrição', 'OR Reason': 'Ou Razão', 'OR Status Reason': 'Ou razão do status', 'OR Status': 'Ou Status', 'OR a site OR a location': 'OU um site OU um local', 'Observer': 'observador', 'Obsolete': 'Obsoleto', 'Obstetrics/Gynecology': 'Obstetrícia/Ginecologia', 'Office Address': 'Endereço do escritório', 'Office Details': 'Detalhes do Escritório.', 'Office Phone': 'Telefone do escritório', 'Office added': 'Escritório', 'Office deleted': 'Escritório excluído', 'Office updated': 'Escritório atualizado', 'Office': 'escritório', 'Offices & Warehouses': 'Escritórios & Armazéns', 'Offices': 'Escritórios', 'Offline Sync (from USB/File Backup)': 'Off-line (Sync a partir do USB/arquivo de Backup)', 'Offline Sync': 'Sincronização desconectada.', 'Older people as primary caregivers of children': 'Pessoas mais velhas como responsáveis primárias de crianças', 'Older people in care homes': 'Pessoas mais velhas em casas de cuidados', 'Older people participating in coping activities': 'Pessoas mais antigos participantes em lidar atividades', 'Older person (>60 yrs)': 'Idosos (>60 anos)', 'On by default? (only applicable to Overlays)': 'Por padrão? (apenas aplicável para Sobreposições)', 'On by default?': 'Por padrão?', 'One Time Cost': 'Custo Único', 'One time cost': 'Custo único', 'One-time costs': 'Custos únicos', 'One-time': 'Único', 'Oops! Something went wrong...': 'Oops! Algo deu errado...', 'Oops! something went wrong on our side.': 'Oops! algo deu errado do nosso lado.', 'Opacity (1 for opaque, 0 for fully-transparent)': 'Opacidade (1 para opaco, 0 para totalmente transparente)', 'Open area': 'Abrir área', 'Open recent': 'Abrir recente', 'Open': 'Abrir', 'Operating Rooms': 'Salas operacionais', 'Optional link to an Incident which this Assessment was triggered by.': 'Link opcional para um incidente que esta avaliação foi desencadeada por.', 'Optional. If you wish to style the features based on values of an attribute, select the attribute to use here.': 'opcional Se você desejar apresenta o estilo com base nos valores de um atributo, Selecione o atributo a ser utilizado aqui.', 'Optional. In GeoServer, this is the Workspace Namespace URI (not the name!). Within the WFS getCapabilities, this is the FeatureType Name part before the colon(:).': 'opcional Em GeoServer, esta é a área de trabalho Namespace URI (não o nome!). Dentro do getCapabilities WFS, este é parte do nome FeatureType antes dos dois pontos (:).', 'Optional. In GeoServer, this is the Workspace Namespace URI. Within the WFS getCapabilities, this is the FeatureType Name part before the colon(:).': 'optional. Em GeoServer, este é o espaço de Nomes URI. No getCapabilities WFS, este é o nome da parte FeatureType antes de os dois pontos (:).', 'Optional. The name of an element whose contents should be a URL of an Image file put into Popups.': 'opcional O nome de um elemento cujo conteúdo deve ser uma URL de um arquivo de imagem para Popups.', 'Optional. The name of an element whose contents should be put into Popups.': 'opcional O nome de um elemento cujo conteúdo deve ser adicionado em Popups.', 'Optional. The name of the schema. In Geoserver this has the form http://host_name/geoserver/wfs/DescribeFeatureType?version=1.1.0&;typename=workspace_name:layer_name.': 'opcional O nome do esquema. Em Geoserver isto tem o formato http://host_name/geoserver/wfs/DescribeFeatureType?version=1.1.0&;typename=workspace_name:layer_name.', 'Options': 'opções', 'Organization Details': 'Detalhes da Organização', 'Organization Registry': 'Registro de Organização', 'Organization added': 'Organização incluída', 'Organization deleted': 'Organização excluída', 'Organization updated': 'Organização atualizada', 'Organization': 'Organização', 'Organizations': 'Organizações', 'Origin of the separated children': 'Origem das crianças separadas', 'Origin': 'Origem', 'Other (describe)': 'Outros (descreva)', 'Other (specify)': 'Outros motivos (especifique)', 'Other Evidence': 'outras evidencias', 'Other Faucet/Piped Water': 'Outras Torneiras /Agua Encanada', 'Other Isolation': 'Outro Isolamento', 'Other Name': 'outro nome', 'Other activities of boys 13-17yrs before disaster': 'Outras atividades de garotos 17-13anos antes do desastre', 'Other activities of boys 13-17yrs': 'Outras atividades de garotos 13-17anos', 'Other activities of boys <12yrs before disaster': 'Outras atividades de garotos <12anos antes do desastre', 'Other activities of boys <12yrs': 'Outras atividades de garotos <12 anos', 'Other activities of girls 13-17yrs before disaster': 'Outras atividades de meninas 13-17anos antes do desastre', 'Other activities of girls 13-17yrs': 'Outras atividades de meninas 13-17anos', 'Other activities of girls<12yrs before disaster': 'Outras atividades de garotas<12anos antes do desastre', 'Other activities of girls<12yrs': 'Outras atividades de garotas<12anos', 'Other alternative infant nutrition in use': 'Nutrição infantil alternativa em uso', 'Other alternative places for study': 'Outros locais alternativos para estudo', 'Other assistance needed': 'Outra assistência necessária', 'Other assistance, Rank': 'Outra assistência, Número', 'Other current health problems, adults': 'Outros problemas actuais de saúde, adultos', 'Other current health problems, children': 'Outros problemas actuais de saúde, crianças', 'Other events': 'outros eventos', 'Other factors affecting school attendance': 'Outros fatores que afetam a frequencia escolar', 'Other major expenses': 'outras despesas importantes', 'Other non-food items': 'Outros itens não alimentícios', 'Other recommendations': 'Outras recomendações', 'Other residential': 'Outros residentes', 'Other school assistance received': 'Assistência de outra escola recebida', 'Other school assistance, details': 'Assistência de outra escola, detalhes', 'Other school assistance, source': 'Assistência de outra escola, origem', 'Other settings can only be set by editing a file on the server': 'Outras configurações só podem ser definidas editando um arquivo no servidor', 'Other side dishes in stock': 'Pratos outro lado em ações', 'Other types of water storage containers': 'Outros tipos de recipientes de armazenamento de água', 'Other ways to obtain food': 'Outras maneiras de obter alimentos', 'Other': 'outro', 'Outbound Mail settings are configured in models/000_config.py.': 'Definições de correio de saída são configurados em modelos/000_config..py', 'Outbox': 'Caixa de Saída', 'Outgoing SMS Handler': 'Saída do Manipulador SMS', 'Outgoing SMS handler': 'Manipulador de SMS de saída', 'Overall Hazards': 'Riscos gerais', 'Overhead falling hazard': 'Risco de queda sobrecarga', 'Overland Flow Flood': 'Por via terrestre Fluxo de Enchente', 'Owned Resources': 'Recursos Próprios', 'PAHO UID': 'OPS UID', 'PIN number': 'Número do pino', 'PIN': 'alfinete', 'PL Women': 'Mulheres PL', 'Pack': 'Pacote', 'Packs': 'Pacotes', 'Parameters': 'Parâmetros de Monitoramento', 'Parapets, ornamentation': 'Passarelas, ornamentação', 'Parent Office': 'Escritório Principal', 'Parent needs to be of the correct level': 'Pai precisa ser do nível correto', 'Parent needs to be set for locations of level': 'Principal precisa ser configurado para locais de nível', 'Parent needs to be set': 'Principal precisa ser configurado', 'Parent': 'parent', 'Parents/Caregivers missing children': 'Pais/cuidadores de crianças desaparecidas', 'Partial': 'Parcial', 'Participant': 'Participante', 'Pashto': 'Pachto', 'Pass': 'Passou', 'Passport': 'passaporte', 'Password': 'senha', 'Path': 'Caminho', 'Pathology': 'Patologia', 'Patients': 'Pacientes', 'Pediatric ICU': 'UTI Pediatrica', 'Pediatric Psychiatric': 'Psiquiátrico Pediátra', 'Pediatrics': 'Pediatria', 'Peer Details': 'Detalhes do Membro', 'Peer Registration Details': 'Detalhes de Registro do Par', 'Peer Registration Request': 'Requerido Registro do Par', 'Peer Registration': 'Registro de par', 'Peer Type': 'Por Tipo', 'Peer UID': 'Por UID', 'Peer added': 'Membro adicionado', 'Peer deleted': 'Membro excluído', 'Peer not allowed to push': 'Peer não permitido para envio', 'Peer registration request added': 'Registro Requerido do Par adicionado', 'Peer registration request deleted': 'Registro requerido do par excluído', 'Peer registration request updated': 'Registro requerido do par atualizado', 'Peer updated': 'PAR ATUALIZADO', 'Peer': 'Membro', 'Peers': 'Pares', 'Pending Requests': 'PEDIDOS PENDENTES', 'Pending': 'pendente', 'People Needing Food': 'Pessoas precisando de alimento', 'People Needing Shelter': 'Pessoas precisando de abrigo', 'People Needing Water': 'Pessoas precisando de água', 'People Trapped': 'Pessoas presas', 'People': 'pessoas', 'Performance Rating': 'Classificação da Performance', 'Person 1': 'Pessoa 1', 'Person 1, Person 2 are the potentially duplicate records': 'Pessoa 1, Pessoa 2 são os registros potencialmente duplicados', 'Person 2': 'Pessoa 2', 'Person De-duplicator': 'Anti-duplicador de Pessoas', 'Person Details': 'Detalhes Pessoais', 'Person Finder': 'Buscador de pessoas', 'Person Registry': 'Registro De Pessoa', 'Person added to Group': 'Membro do grupo incluído', 'Person added to Team': 'Membro do grupo incluído', 'Person added': 'Pessoa Incluída', 'Person deleted': 'Pessoa removida', 'Person details updated': 'Detalhes pessoais actualizados', 'Person interviewed': 'Pessoa entrevistada', 'Person missing': 'Pessoa perdida', 'Person reporting': 'Pessoa relatando', 'Person who has actually seen the person/group.': 'Pessoa que tenha realmente visto a pessoa/Grupo.', 'Person': 'pessoa', 'Person/Group': 'Pessoa/Grupo', 'Personal Data': 'Dados pessoais', 'Personal Effects Details': 'Detalhes dos Efeitos Pessoais', 'Personal Effects': 'Efeitos pessoal', 'Personal Map': 'Mapa De Pessoal', 'Personal Profile': 'Perfil pessoal', 'Personal impact of disaster': 'Impacto de desastre pessoal', 'Personal': 'Pessoal', 'Persons in institutions': 'Pessoas em instituições', 'Persons with disability (mental)': 'Pessoas com deficiência (mental)', 'Persons with disability (physical)': 'Pessoas com deficiência (física)', 'Persons': 'Pessoas', 'Phone 1': 'Telefone 1', 'Phone 2': 'Telefone 2', 'Phone': 'telefone', 'Phone/Business': 'Telefone comercial', 'Phone/Emergency': 'Telefone de emergência', 'Phone/Exchange (Switchboard)': 'Telefone/Câmbio (Central)', 'Phone/Exchange': 'Telefone/Exchange', 'Photo Details': 'Foto com detalhes', 'Photo Taken?': 'Foto tomada?', 'Photo added': 'Foto adicionada (ou incluída)', 'Photo deleted': 'Foto deletada (apagada, excluída em definitivo)', 'Photo updated': 'Foto ATUALIZADA', 'Photo': 'foto', 'Photograph': 'Fotografia ou Arte Fotográfica', 'Photos': 'fotos, imagens fotográficas', 'Physical Description': 'Descrição física', 'Physical Safety': 'Segurança Física', 'Picture upload and finger print upload facility': 'Fazer upload de imagem e impressão dedo upload facility', 'Picture': 'Imagem', 'Place of Recovery': 'Local de recuperação', 'Place': 'Local', 'Places for defecation': 'Locais para a defecação', 'Places the children have been sent to': 'Lugares que as crianças foram enviadas para', 'Planner': 'Planejador', 'Playing': 'Reproduzindo', 'Please correct all errors.': 'Por favor CORRIJA todos os erros.', 'Please enter a First Name': 'Por favor insira um primeiro nome', 'Please enter a first name': 'Por favor insira um primeiro nome', 'Please enter a person': 'Insira uma pessoa', 'Please enter a site OR a location': 'Por favor digite um site ou um local', 'Please enter the first few letters of the Person/Group for the autocomplete.': 'Por favor Digite as primeiras letras do Pessoa/Grupo para o AutoCompletar.', 'Please enter the recipient': 'Por favor Digite o destinatário', 'Please fill this!': 'Por favor preencha isso!', 'Please provide the URL of the page you are referring to, a description of what you expected to happen & what actually happened. If a ticket was issued then please provide the Ticket ID.': 'Por favor Forneça a URL da página que você está fazendo referência à, uma descrição do que você esperava que acontecesse & O que realmente aconteceu. Se um bilhete foi emitido então por favor forneça o ID do bilhete.', 'Please provide the URL of the page you are referring to, a description of what you expected to happen & what actually happened.': 'Por favor Forneça a URL da página que você está fazendo referência à, uma descrição do que você esperava que acontecesse & O que realmente aconteceu.', 'Please report here where you are:': 'Por favor informe aqui onde você está:', 'Please select another level': 'Por favor selecione outro nível', 'Please select': 'Por favor Selecione', 'Please sign-up with your Cell Phone as this allows us to send you Text messages. Please include full Area code.': 'Por favor inscrever-se com seu celular como isso nos permite lhe enviar mensagens de texto. Por favor inclua código de Área total.', 'Please specify any problems and obstacles with the proper handling of the disease, in detail (in numbers, where appropriate). You may also add suggestions the situation could be improved.': 'Por favor especifique quaisquer problemas e obstáculos com a manipulação correcta da doença, em detalhes (em números, se for o caso). Pode também dar sugestões - a situação pode ser melhorada.', 'Please use this field to record any additional information, including a history of the record if it is updated.': 'Por favor utilize esse campo para registrar quaisquer informações adicionais, incluindo um histórico do registro se ele estiver sendo atualizado.', 'Please use this field to record any additional information, including any Special Needs.': 'Por favor utilize esse campo para registrar quaisquer informações adicionais, incluindo quaisquer necessidades especiais.', 'Please use this field to record any additional information, such as Ushahidi instance IDs. Include a history of the record if it is updated.': 'Por favor utilize esse campo para registrar quaisquer informações adicionais, como IDs de instância Ushahidi. Incluir o histórico do registo se este fôr actualizado.', 'Pledge Support': 'Suporte da promessa', 'Point': 'Ponto', 'Poisoning': 'Envenenamento', 'Poisonous Gas': 'Gás venenoso', 'Police': 'Polícia', 'Pollution and other environmental': 'Poluição ambiental e outras', 'Polygon reference of the rating unit': 'Polígono de referência da unidade de classificação', 'Polygon': 'Polígono', 'Poor': 'Pobre', 'Population Statistic Details': 'População Estatística Detalhes', 'Population Statistic added': 'População Estatística incluída', 'Population Statistic deleted': 'População Estatística excluído', 'Population Statistic updated': 'População De Estatística atualizada', 'Population Statistics': 'Estatísticas De população', 'Population and number of households': 'população e número de residentes', 'Population': 'População', 'Popup Fields': 'Pop-up Campos', 'Popup Label': 'Rótulo do pop-up', 'Porridge': 'mingau', 'Port Closure': 'Porta Encerramento', 'Port': 'porta', 'Portuguese (Brazil)': 'Português (Brasil)', 'Portuguese': 'Português', 'Position Catalog': 'Catálogo de posições', 'Position Details': 'detalhamento do cargo', 'Position added': 'Cargo inserido', 'Position deleted': 'Cargo excluído', 'Position updated': 'Posição atualizada', 'Position': 'Posição', 'Positions': 'cargos', 'Postcode': 'Código Postal', 'Poultry restocking, Rank': 'Reabastecimento de aves domésticas, posição', 'Poultry': 'Aves', 'Pounds': 'Libras', 'Power Failure': 'Falha de Energia', 'Powered by Sahana Eden': 'Desenvolvido pela Sahana Eden', 'Pre-cast connections': 'Conexões-cast pré', 'Preferred Name': 'Nome Preferido', 'Pregnant women': 'Mulheres grávidas', 'Preliminary': 'Preliminar', 'Presence Condition': 'Condição de Presença', 'Presence Log': 'Log de Presença', 'Presence': 'Presença', 'Previous': 'Anterior', 'Primary Name': 'Nome Principal', 'Primary Occupancy': 'Principal Ocupação', 'Priority from 1 to 9. 1 is most preferred.': 'Prioridade de 1 a 9. 1 é preferível', 'Priority': 'priority', 'Private': 'Privado', 'Problem Administration': 'Gestão de problema', 'Problem Details': 'Detalhes do Problema', 'Problem Group': 'Grupo do Problema', 'Problem Title': 'Título do Problema', 'Problem added': 'Problema incluído', 'Problem connecting to twitter.com - please refresh': 'Problema ao conectar-se ao twitter.com, tente novamente', 'Problem deleted': 'Problema Excluído', 'Problem updated': 'Problema Atualizado', 'Problem': 'Problema do', 'Problems': 'Problemas', 'Procedure': 'Procedimento', 'Process Received Shipment': 'Processo recebeu embarque', 'Process Shipment to Send': 'Processar remessa a enviar', 'Profile': 'profile', 'Project Details': 'Detalhes do Projeto', 'Project Status': 'Status do Projeto', 'Project Tracking': 'Acompanhamento do Projeto', 'Project added': 'Projeto incluído', 'Project deleted': 'Projeto Excluído', 'Project has no Lat/Lon': 'Projeto não possui Latitude/Longitude', 'Project updated': 'Projeto ATUALIZADO', 'Project': 'projeto', 'Projection Details': 'Detalhes da Projeção', 'Projection added': 'Projeção incluída', 'Projection deleted': 'Projeção excluída', 'Projection updated': 'Projecção atualizada', 'Projection': 'Projeção', 'Projections': 'projeções', 'Projects': 'projetos', 'Property reference in the council system': 'Referência de propriedade no sistema do conselho', 'Protected resource': 'Recurso protegido', 'Protection': 'Protecção', 'Provide Metadata for your media files': 'Fornecer Metadados para os seus ficheiros media', 'Provide an optional sketch of the entire building or damage points. Indicate damage points.': 'Fornecer um retrato opcional de todo o edifício ou áreas danificadas. Pontos danos indicar.', 'Proxy-server': 'Servidor Proxy', 'Psychiatrics/Adult': 'Psiquiatras/Adulto', 'Psychiatrics/Pediatric': 'Psiquiatras/Pediátrica', 'Public Event': 'Evento público', 'Public and private transportation': 'Transporte Público e Privado', 'Public assembly': 'Assembléia Pública', 'Public': 'Público', 'Pull tickets from external feed': 'Pull de bilhetes alimentação externa', 'Purchase Date': 'Data de aquisição', 'Push tickets to external system': 'BILHETES Push PARA sistema externo', 'Pyroclastic Flow': 'Pyroclastic FLuxo', 'Pyroclastic Surge': 'Pyroclastic Aumento', 'Python Serial module not available within the running Python - this needs installing to activate the Modem': 'Módulo Serial Python não disponíveis no a execução Python-isto tem de instalar para ativar o Modem', 'Python needs the ReportLab module installed for PDF export': 'O módulo de ReportLab não disponíveis na execução Python - isto requer a instalação para a entrega em PDF!', 'Quantity Committed': 'Quantidade Comprometida', 'Quantity Fulfilled': 'Quantidade Preenchida', 'Quantity in Transit': 'Quantidade em Trânsito', 'Quantity': 'Quantidade', 'Quarantine': 'Quarentena', 'Queries': 'Buscas', 'Query': 'Busca', 'Queryable?': 'Consultável?', 'RC frame with masonry infill': 'Quadro de RC com aterros de alvenaria', 'RECORD A': 'Registro A', 'RECORD B': 'REGISTRO B', 'Race': 'Corrida', 'Radio Callsign': 'Rádio Chamada', 'Radiological Hazard': 'Risco Radiológico', 'Radiology': 'Radiologia', 'Railway Accident': 'Acidente Ferroviário', 'Railway Hijacking': 'Sequestro Ferroviário', 'Rain Fall': 'Queda de Chuva', 'Rapid Assessment Details': 'Rápida Avaliação Detalhes', 'Rapid Assessment added': 'Rapid Avaliação incluído', 'Rapid Assessment deleted': 'Rápida Avaliação excluído', 'Rapid Assessment updated': 'Rapid avaliação atualizada', 'Rapid Assessment': 'Avaliação Rápida', 'Rapid Assessments & Flexible Impact Assessments': 'Rapid Avaliações & Flexível Impacto Avaliações', 'Rapid Assessments': 'Rapid Avaliações', 'Rapid Close Lead': 'Fechamento Lead rápido', 'Rapid Data Entry': 'Entrada de dados rápida', 'Rating Scale': 'Escala de avaliação', 'Raw Database access': 'Acesso bruto a Base de dados', 'Read-Only': 'somente para leitura', 'Read-only': 'somente para leitura', 'Receive Items': 'Aceitar itens', 'Receive New Shipment': 'Receber Novos Embarques', 'Receive Shipment': 'Receber carregamento', 'Receive this shipment?': 'Receber esse embarque?', 'Receive': 'Receber', 'Received By Person': 'Recebido Por Pessoa', 'Received By': 'Recebido Por', 'Received Item Details': 'Detalhes do item recebido', 'Received Item deleted': 'Recebido item excluído', 'Received Item updated': 'Item recebido atualizado', 'Received Shipment Details': 'Lista de remessa de mercadorias/produtos', 'Received Shipment canceled and items removed from Inventory': 'Recebido carregamento cancelado e itens removidos do inventário', 'Received Shipment canceled': 'Remessa de produtos cancelada', 'Received Shipment updated': 'Carregamento Recebido Atualizado', 'Received Shipments': 'Carregamento de produtos recebido', 'Received': 'Recebido', 'Receiving and Sending Items': 'Receber e enviar Itens', 'Recipient': 'destinatário', 'Recipients': 'destinatários', 'Recommendations for Repair and Reconstruction or Demolition': 'Recomendações para reparo e reconstrução ou demolição', 'Record Details': 'Detalhes do Registro', 'Record Saved': 'Registro Gravado', 'Record added': 'Registro incluído', 'Record any restriction on use or entry': 'Registro de qualquer restrição à utilização ou entrada', 'Record deleted': 'Registro excluído', 'Record last updated': 'Último registro atualizado', 'Record not found!': 'Registro não encontrado!', 'Record not found': 'Registro não encontrado', 'Record updated': 'registro atualizado', 'Record': 'registro', 'Recording and Assigning Assets': 'Ativos de Gravação e Designação', 'Records': 'Registros', 'Recovery Request added': 'Pedido de recuperação adicionado', 'Recovery Request deleted': 'Pedido de recuperação apagado', 'Recovery Request updated': 'Pedido de recuperação atualizado', 'Recovery Request': 'pedido de recuperação', 'Recovery Requests': 'Pedidos de recuperação', 'Recovery': 'recuperação', 'Recruitment': 'Recrutamento', 'Recurring Cost': 'Custo recorrente', 'Recurring cost': 'Custo recorrente', 'Recurring costs': 'Custos recorrentes', 'Recurring': 'Recorrente', 'Red Cross / Red Crescent': 'Cruz Vermelha / Red Crescent', 'Red': 'vermelho', 'Reference Document': 'Documento de referência', 'Refresh Rate (seconds)': 'Taxa de Atualização (Segundos)', 'Region Location': 'Localizaçao da regiao', 'Regional': 'regional', 'Regions': 'Regiões', 'Register Person into this Camp': 'Registrar Pessoa neste Acampamento', 'Register Person into this Shelter': 'REGISTRAR PESSOA PARA ESTE Abrigo', 'Register Person': 'REGISTRAR PESSOA', 'Register them as a volunteer': 'Registrá-los como voluntários', 'Register': 'registro', 'Registered People': 'Pessoas Registradas', 'Registered users can': 'Os usuários registrados podem', 'Registration Details': 'Detalhes da Inscrição', 'Registration added': 'Inscrição adicionada', 'Registration entry deleted': 'Inscrição excluída', 'Registration is still pending approval from Approver (%s) - please wait until confirmation received.': 'Registro ainda está pendente de aprovação do Aprovador (%s) - Por favor, aguarde até a confirmação recebida.', 'Registration updated': 'Inscrição atualizada', 'Registration': 'Inscrição', 'Rehabilitation/Long Term Care': 'Reabilitação/Cuidados de Longo Termo', 'Reinforced masonry': 'Alvenaria reforçada', 'Rejected': 'Rejeitado', 'Relief Team': 'Equipe de socorro', 'Relief': 'Alivio', 'Religion': 'Religião', 'Religious Leader': 'Líder religioso', 'Religious': 'Religiosas', 'Relocate as instructed in the <instruction>': 'Relocalizar conforme instruído no', 'Remove Asset from this event': 'Remover ativo deste evento', 'Remove Asset from this scenario': 'Remover ativo deste cenário', 'Remove Facility from this event': 'Remover recurso deste evento', 'Remove Facility from this scenario': 'Remover recurso deste cenário', 'Remove Human Resource from this event': 'Remover recursos humanos a partir deste evento', 'Remove Human Resource from this scenario': 'Remover recursos humanos a partir deste cenário', 'Remove Item from Inventory': 'Remover Item do Inventário', 'Remove Map Configuration from this event': 'Remover Mapa de configuração a partir deste evento', 'Remove Map Configuration from this scenario': 'Remover Mapa de configuração a partir deste cenário', 'Remove Person from Group': 'Excluir membro', 'Remove Person from Team': 'Excluir membro', 'Remove this asset from this event': 'Remover este recurso a partir deste evento', 'Remove this asset from this scenario': 'Remover este recurso deste cenário', 'Remove': 'Remover', 'Removed from Group': 'Associação Excluída', 'Removed from Team': 'Associação Excluída', 'Repair': 'REPARO', 'Repaired': 'Reparado', 'Repeat your password': 'REPITA sua senha', 'Replace if Master': 'Substituir se Principal', 'Replace if Newer': 'Substituir se o Mais Recente', 'Replace': 'TROCAR', 'Report Another Assessment...': 'Adicionar Outro Relatório De Avaliação....', 'Report Details': 'Detalhes do Relatório', 'Report Resource': 'Reportar Recursos', 'Report Types Include': 'Tipos de relatório incluem', 'Report added': 'Relatório incluído', 'Report deleted': 'Relatório removido', 'Report my location': 'Relate meu local', 'Report the contributing factors for the current EMS status.': 'Reportar os factores que contribuem para a situação EMS actual.', 'Report the contributing factors for the current OR status.': 'Reportar os factores que contribuem para a situação OR actual.', 'Report them as found': 'Reportar como encontrados', 'Report them missing': 'Reportar como perdidos', 'Report updated': 'Relatório atualizado', 'Report': 'Relatório', 'Reporter Name': 'Nome do Relator', 'Reporter': 'Relator', 'Reporting on the projects in the region': 'Relatórios sobre os projetos na região', 'Reports': 'Relatórios', 'Request Added': 'Pedido Incluído', 'Request Canceled': 'Pedido Cancelado', 'Request Details': 'Detalhes do Pedido', 'Request From': 'Pedido De', 'Request Item Details': 'Detalhes do item de pedido', 'Request Item added': 'Item incluído no pedido', 'Request Item deleted': 'Item de pedido excluído', 'Request Item from Available Inventory': 'PEDIDO DE Item de Inventário Disponível', 'Request Item updated': 'Pedido actualizado', 'Request Item': 'Item de pedido', 'Request Items': 'Itens de pedido', 'Request Status': 'Status do Pedido', 'Request Type': 'Tipo de Pedido', 'Request Updated': 'Solicitação atualizada', 'Request added': 'Pedido adicionado', 'Request deleted': 'Solicitação excluída', 'Request for Role Upgrade': 'Pedido de upgrade de função', 'Request updated': 'Pedido actualizado', 'Request': 'Pedido', 'Request, Response & Session': 'Pedido, Resposta & Sessão', 'Requested By Facility': 'Solicitado Pela Instalação', 'Requested By Site': 'Solicitado Por Site', 'Requested By': 'Solicitado Por', 'Requested From': 'Solicitada a Partir de', 'Requested Items': 'Itens solicitados', 'Requested by': 'Solicitado Por', 'Requested on': 'Em solicitada', 'Requested': 'solicitado', 'Requester': 'Solicitante', 'Requests Management': 'Gerenciamento de Pedidos', 'Requests': 'Pedidos', 'Requires Login!': 'É necessário fazer login!', 'Rescue and recovery': 'Resgate e recuperação', 'Reset Password': 'restabelecer senha', 'Reset': 'Restaurar', 'Resolve Conflict': 'Resolver Conflito', 'Resolve link brings up a new screen which helps to resolve these duplicate records and update the database.': 'Resolva link que levará até uma nova tela que ajudará a resolver esses registros duplicados e atualizar o banco de dados.', 'Resolve': 'Resolver', 'Resource Details': 'Detalhes do recurso', 'Resource added': 'Recurso incluído', 'Resource deleted': 'Recurso Excluído', 'Resource updated': 'Recurso atualizado', 'Resource': 'Recurso', 'Resources': 'Recursos', 'Respiratory Infections': 'Infecções respiratórias', 'Response': 'Resposta', 'Restricted Access': 'Acesso Restrito', 'Restricted Use': 'Uso restrito', 'Results': 'results', 'Retail Crime': 'Crime a varejo', 'Retrieve Password': 'Recuperar Senha', 'Return to Request': 'Retornar ao pedido', 'Return': 'Retorno', 'Returned From': 'Retornado a partir de', 'Returned Status': 'Retornado Status', 'Returned': 'Retornado', 'Review Incoming Shipment to Receive': 'Revisão da Remessa de Entrada para Receber', 'Rice': 'Arroz', 'Riot': 'Motim', 'River Details': 'Detalhes do Rio', 'River added': 'Rio adicionado', 'River deleted': 'Rio deletado', 'River updated': 'Rio atualizado', 'River': 'Rio', 'Rivers': 'Rios', 'Road Accident': 'Acidente na rua/estrada', 'Road Closed': 'Rua/Estrada fechada', 'Road Conditions': 'Condições da Estrada', 'Road Delay': 'Atraso de Estrada', 'Road Hijacking': 'Sequestro de Estrada', 'Road Usage Condition': 'Condição de Uso de Estrada', 'Role Details': 'Detalhes da Função', 'Role Required': 'Função requerida', 'Role Updated': 'Funções atualizadas', 'Role added': 'Regra incluída', 'Role deleted': 'Função excluída', 'Role updated': 'Funções atualizadas', 'Role': 'Função', 'Role-based': 'Baseada em regra', 'Roles Permitted': 'Funções Permitidas', 'Roles': 'Funções', 'Roof tile': 'Telhado lado a lado', 'Roofs, floors (vertical load)': 'Telhados, pisos (carga vertical)', 'Room Details': 'Detalhes da sala', 'Room added': 'Sala incluída', 'Room deleted': 'Sala excluída', 'Room updated': 'Sala atualizada', 'Room': 'Sala', 'Rooms': 'Salas', 'Roster': 'Lista', 'Row Choices (One Per Line)': 'Opções da linha (Um por linha)', 'Rows in table': 'Linhas na tabela', 'Rows selected': 'Linhas Selecionadas', 'Run Functional Tests': 'Executar testes funcionais', 'Run Interval': 'Intervalo de execução', 'Running Cost': 'Custo corrente', 'Safe environment for vulnerable groups': 'Ambiente seguro para grupos vulneráveis', 'Safety Assessment Form': 'Formulário de avaliação de segurança', 'Safety of children and women affected by disaster?': 'Segurança das crianças e mulheres afetadas pela catástrofe?', 'Sahana Administrator': 'Sahana AdmiNistrador', 'Sahana Blue': 'Sahana Azul', 'Sahana Community Chat': 'Sahana COMUNIDADE de BATE-PAPO', 'Sahana Eden <=> Other': 'Sahana Eden <=> Outros', 'Sahana Eden <=> Sahana Eden': 'Sahana Éden <=> Sahana Éden', 'Sahana Eden Humanitarian Management Platform': 'plataforma de gerenciamento humanitário Sahana Éden', 'Sahana Eden Website': 'SITE Sahana Éden', 'Sahana Green': 'Sahana Verde', 'Sahana access granted': 'Acesso Sahana CONCEDIDO', 'Salted Fish': 'Peixe Salgado', 'Sanitation problems': 'Problemas de saneamento', 'Satellite Office': 'Escritório experimental', 'Satellite': 'satélite', 'Saturday': 'SAturday', 'Save': 'armazenar', 'Saved.': 'armazenado.', 'Saving...': 'Guardando...', 'Scale of Results': 'Nível de Resultados', 'Scenario Details': 'Detalhes do Cenário', 'Scenario added': 'Cenário incluído', 'Scenario deleted': 'Cenário excluído', 'Scenario updated': 'Cenário atualizado', 'Scenario': 'Cenário', 'Scenarios': 'Cenários', 'Schedule': 'Horário', 'Schema': 'Esquema', 'School Closure': 'Encerramento Escolar', 'School Lockdown': 'Bloqueio escolar', 'School Teacher': 'Professor de escola', 'School activities': 'Actividades escolares', 'School assistance': 'Assistência escolar', 'School attendance': 'Presença escolar', 'School destroyed': 'Escola Destruída', 'School heavily damaged': 'Escola fortemente danificada', 'School tents received': 'Tendas da escola recebidas', 'School tents, source': 'Tendas de escolha, origem', 'School used for other purpose': 'Escola utilizada para outros fins', 'School': 'Escola', 'School/studying': 'Escola/estudando', 'Schools': 'Escolas', 'Search Activities': 'procurar atividades', 'Search Activity Report': 'Relatório de pesquisa de atividades', 'Search Addresses': 'procurar endereços', 'Search Alternative Items': 'Procurar itens alternativos', 'Search Assessment Summaries': 'Procura De Avaliação De RESUMOS', 'Search Assessments': 'Avaliações de procura', 'Search Asset Assignments': 'Procurar ATIVO Designações', 'Search Asset Log': 'Procurar log de ativo', 'Search Assets': 'Procurar Recursos', 'Search Baseline Type': 'Procurar Typo de Base', 'Search Baselines': 'Procurar Bases', 'Search Brands': 'Procurar Marcas', 'Search Budgets': 'Procura Orçamentos', 'Search Bundles': 'PACOTES Configuráveis de procura', 'Search Camp Services': 'Procurar Serviços de Acampamento', 'Search Camp Types': 'Procurar Tipos De Acampamento', 'Search Camps': 'Procurar acampamentos', 'Search Catalog Items': 'Itens de procura De Catálogo', 'Search Catalogs': 'Procurar nos Catálogos', 'Search Certificates': 'Procurar Certificados', 'Search Certifications': 'Procurar Certificações', 'Search Checklists': 'Listas De procura', 'Search Cluster Subsectors': 'Procura De Cluster Subsectores', 'Search Clusters': 'Clusters de procura', 'Search Commitment Items': 'Itens de procura Compromisso', 'Search Commitments': 'Compromissos de procura', 'Search Competencies': 'Procurar Competências', 'Search Competency Ratings': 'Procurar Indices de Competência', 'Search Contact Information': 'Procurar informações de contato', 'Search Contacts': 'Buscar contatos', 'Search Course Certificates': 'procura Certificados de Curso', 'Search Courses': 'Procurar Cursos', 'Search Credentials': 'Credenciais de busca', 'Search Documents': 'Pesquisar documentos', 'Search Donors': 'Procura de Doadores', 'Search Entries': 'Pesquisar Entradas', 'Search Events': 'Pesquisar Eventos', 'Search Facilities': 'Pesquisar Instalações', 'Search Feature Layers': 'Pesquisar camadas do dispositivo', 'Search Flood Reports': 'Pesquisar relatórios de inundação', 'Search Groups': 'Buscar Grupos', 'Search Human Resources': 'Pesquise recursos humanos.', 'Search Identity': 'Buscar Identidade', 'Search Images': 'Procurar Imagens', 'Search Impact Type': 'Procurar Tipo de Impacto', 'Search Impacts': 'Procurar Impactos', 'Search Incident Reports': 'Procurar Relatórios de Incidentes', 'Search Inventory Items': 'Procurar Entradas De Inventário', 'Search Inventory items': 'Procurar Entradas De Inventário', 'Search Item Categories': 'Buscar categorias de Item', 'Search Item Packs': 'Buscar pocotes de itens', 'Search Items': 'Buscar Itens', 'Search Job Roles': 'Pesquise papéis de trabalho', 'Search Keys': 'Procurar chaves', 'Search Kits': 'Procurar kits', 'Search Layers': 'Procurar camadas', 'Search Level 1 Assessments': 'Procurar Avaliações Nível 1', 'Search Level 2 Assessments': 'Procurar Avaliações Nível 2', 'Search Locations': 'Procurar Localidades', 'Search Log Entry': 'Procura de entrada de Log', 'Search Map Configurations': 'Pesquise mapa de configurações.', 'Search Markers': 'Marcadores De procura', 'Search Members': 'Procurar Membro', 'Search Membership': 'Procurar filiação', 'Search Memberships': 'Pesquisar Associações', 'Search Missions': 'Procurar Missões', 'Search Need Type': 'Procura Precisa De Tipo', 'Search Needs': 'Procura precisa', 'Search Notes': 'Notes procura', 'Search Offices': 'Escritórios de procura', 'Search Organizations': 'Pesquisar Organizações', 'Search Peer': 'PROCURA Par', 'Search Personal Effects': 'Procura objetos pessoais', 'Search Persons': 'Buscar Membros', 'Search Photos': 'Procura Fotos', 'Search Population Statistics': 'Procurar Estatística de População', 'Search Positions': 'Procura de Posições', 'Search Problems': 'Procura de Problemas', 'Search Projections': 'Projeções de procura', 'Search Projects': 'Procura de Projetos', 'Search Rapid Assessments': 'Procura de Avaliações Rápidas', 'Search Received Items': 'Procura de Itens Recebidos', 'Search Received Shipments': 'Embarques de procura Recebidos', 'Search Records': 'registros de procura', 'Search Registations': 'Registations procura', 'Search Registration Request': 'Pedido de registro de procura', 'Search Report': 'Procurar Relatório', 'Search Reports': 'Procurar Relatórios', 'Search Request Items': 'Pedido de procura de Itens', 'Search Request': 'pedido de pesquisa', 'Search Requested Items': 'Procura de itens solicitados', 'Search Requests': 'Procura de solicitações', 'Search Resources': 'Pesquisa de recursos', 'Search Rivers': 'Rios procura', 'Search Roles': 'Pesquisa de papéis', 'Search Rooms': 'Procurar Salas', 'Search Scenarios': 'Procurar cenários', 'Search Sections': 'As Seções de procura', 'Search Sectors': 'Procurar Setores', 'Search Sent Items': 'Procurar Itens Enviados', 'Search Sent Shipments': 'Procurar Despachos Enviados', 'Search Service Profiles': 'Serviço de procura Perfis', 'Search Settings': 'Definições de Pesquisa', 'Search Shelter Services': 'Procura Abrigo de serviços', 'Search Shelter Types': 'Procura tipos de Abrigo', 'Search Shelters': 'Procurar Abrigos', 'Search Skill Equivalences': 'Procurar equivalencias de habilidades', 'Search Skill Provisions': 'Procurar Disposições de habilidade', 'Search Skill Types': 'Pesquisar Tipos de Habilidades', 'Search Skills': 'Pesquisar Habilidades', 'Search Solutions': 'Pesquisar Soluções', 'Search Staff Types': 'Busca de tipo de pessoal', 'Search Staff or Volunteer': 'Procurar Funcionário ou Voluntário', 'Search Staff': 'Busca de pessoal', 'Search Status': 'Busca de status', 'Search Subscriptions': 'Busca de assinaturas', 'Search Subsectors': 'Buscar subsetores', 'Search Support Requests': 'Pedidos de suporte a pesquisa', 'Search Tasks': 'Tarefa de Pesquisa', 'Search Teams': 'Times de pesquisa', 'Search Themes': 'Temas de pesquisa', 'Search Tickets': 'Buscar Bilhetes', 'Search Tracks': 'Procurar Trilhas', 'Search Trainings': 'Buscar Treinamentos', 'Search Twitter Tags': 'Procurar Twitter Tags', 'Search Units': 'Procura Unidades', 'Search Users': 'Procurar Usuários', 'Search Volunteer Availability': 'Buscar Disponibilidade para Voluntáriado', 'Search Volunteers': 'Procura Voluntários', 'Search Warehouses': 'procura Warehouses', 'Search and Edit Group': 'Procurar e editar GRUPO', 'Search and Edit Individual': 'Procurar e Editar Individual', 'Search for Staff or Volunteers': 'Pesquise por funcionários ou voluntários', 'Search for a Location by name, including local names.': 'Pesquisar local por nome, incluindo nomes locais.', 'Search for a Person': 'Procurar Pessoa', 'Search for a Project': 'Procurar Projecto', 'Search for a shipment by looking for text in any field.': 'Procurar carga fazendo uma pesquisa de texto em qualquer campo.', 'Search for a shipment received between these dates': 'Procurar carga recebida entre estas datas', 'Search for an Organization by name or acronym': 'Procurar por uma Organização por nome ou iniciais', 'Search for an Organization by name or acronym.': 'Procurar por uma organização por nome ou iniciais.', 'Search for an asset by text.': 'Pesquisar um recurso por texto.', 'Search for an item by category.': 'Procurar por categoria.', 'Search for an item by text.': 'Procurar por texto.', 'Search for asset by country.': 'Procurar bens por país.', 'Search for office by country.': 'Procurar escritórios por país.', 'Search for office by organization.': 'Procurar escritórios por organização.', 'Search for office by text.': 'Procura por texto do gabinete.', 'Search for warehouse by country.': 'Pesquise por depósito por país.', 'Search for warehouse by organization.': 'Pesquise por depósito por organização.', 'Search for warehouse by text.': 'Pesquise por depósito via campo-texto.', 'Search here for a person record in order to:': 'Buscar aqui por um registro de pessoa a fim de:', 'Search messages': 'Mensagens de Procura', 'Search': 'Pesquisar', 'Searching for different groups and individuals': 'Procurar diferentes grupos e indivíduos', 'Secondary Server (Optional)': 'Servidor secundário (opcional)', 'Seconds must be a number between 0 and 60': 'Segundos deve ser um número entre 0 e 60', 'Section Details': 'Seção Detalhes', 'Section deleted': 'Seção excluído', 'Section updated': 'Seção atualizada', 'Sections': 'Seções', 'Sector Details': 'Detalhes do Setor', 'Sector added': 'Sector incluído', 'Sector deleted': 'Sector apagado', 'Sector updated': 'Setor atualizado', 'Sector': 'setor', 'Sector(s)': 'Setor(es)', 'Sectors': 'Setores', 'Security Status': 'Status de Segurança', 'Security problems': 'Problemas de Segurança', 'See All Entries': 'Ver todas as entradas', 'See all': 'Ver tudo', 'See unassigned recovery requests': 'Consulte Pedidos de recuperação designado', 'Seen': 'Visto', 'Select Items from the Request': 'Selecionar itens do pedido', 'Select Items from this Inventory': 'Selecionar itens a partir deste Inventário', 'Select Organization': 'Selecionar Organização', 'Select a location': 'Selecionar um local', 'Select a question from the list': 'Selecione uma pergunta a partir da lista', 'Select a range for the number of total beds': 'Selecione um intervalo para o número de camas total', 'Select all that apply': 'Selecione todas as que se applicam', 'Select an Organization to see a list of offices': 'Selecione uma organização para ver uma lista de escritórios', 'Select the overlays for Assessments and Activities relating to each Need to identify the gap.': 'Selecione as sobreposições de avaliação e actividades relacionadas com cada necessidade para identificar as lacunas.', 'Select the person assigned to this role for this project.': 'Selecione a pessoa designada para essa função neste projeto.', 'Select to show this configuration in the Regions menu.': 'Selecione para mostrar essa configuração no menu regiões.', 'Select': 'select', 'Selects whether to use a Modem, Tropo or other Gateway for sending out SMS': 'Selecione se vau utilizar um Modem, Tropo ou outro Gateway para enviar SMS', 'Send Alerts using Email &/or SMS': 'Envio de alertas usando e-mail e/ou SMS', 'Send Commitment as Shipment': 'Enviar compromisso como carregamento', 'Send New Shipment': 'Enviar nova remessa', 'Send Notification': 'Enviar notificação', 'Send Shipment': 'Enviar Carregamento', 'Send a message to this person': 'Enviar uma mensagem para esta pessoa', 'Send a message to this team': 'Enviar uma mensagem para essa equipe', 'Send from %s': 'Enviar de %s', 'Send message': 'Enviar mensagem', 'Send new message': 'Enviar nova mensagem', 'Send': 'Envie', 'Sends & Receives Alerts via Email & SMS': 'Envia & Recebe Alertas via E-Mail & SMS', 'Senior (50+)': 'Sênior (50+)', 'Sent By Person': 'Enviado Por Pessoa', 'Sent By': 'Enviado Por', 'Sent Item Details': 'Detalhes do Item enviado', 'Sent Item deleted': 'Enviado Item excluído', 'Sent Item updated': 'Enviado Item atualizado', 'Sent Shipment Details': 'Enviado Detalhes de Embarque', 'Sent Shipment canceled and items returned to Inventory': 'Enviado Carregamento cancelado e itens retornado ao Inventário', 'Sent Shipment canceled': 'Enviado Carregamento cancelado', 'Sent Shipment updated': 'Enviado Embarque atualizado', 'Sent Shipments': 'Remessas Enviadas', 'Sent': 'Enviadas', 'Separated children, caregiving arrangements': 'Crianças separados, disposições caregiving', 'Serial Number': 'Numero de série', 'Series': 'serie', 'Server': 'servidor', 'Service Catalog': 'Catálogo de Serviços', 'Service or Facility': 'Serviço ou facilidade', 'Service profile added': 'Perfil de serviço adicionado', 'Service profile deleted': 'Perfil de serviço Excluído', 'Service profile updated': 'Perfil de serviço atualizado', 'Service': 'serviço', 'Services Available': 'Serviços Disponíveis', 'Services': 'Serviços', 'Set Base Site': 'Definir base de dados do site', 'Set By': 'Definido por', 'Set True to allow editing this level of the location hierarchy by users who are not MapAdmins.': 'Configure como True para permitir que este nível da hierarquia do local possa ser editado por usuários que não sejam administradores.', 'Setting Details': 'Detalhes de ajuste', 'Setting added': 'Configuração adicionada', 'Setting deleted': 'Configuração Excluída', 'Setting updated': 'Configuração atualizada', 'Settings updated': 'Ajustes atualizados', 'Settings were reset because authenticating with Twitter failed': 'As configurações foram redefinidas porque a autenticação com Twitter falhou', 'Settings which can be configured through the web interface are available here.': 'As configurações que podem ser definidas através da interface da web estão disponíveis aqui.', 'Settings': 'Ajustes', 'Severe': 'Severo', 'Severity': 'Gravidade', 'Share a common Marker (unless over-ridden at the Feature level)': 'Compartilhar um marcador comum (a não ser que abaixo-assinado ao nível de Componente)', 'Shelter & Essential NFIs': 'Abrigo & NFIs Essenciais', 'Shelter Details': 'Detalhes de Abrigo', 'Shelter Name': 'Nome de Abrigo', 'Shelter Registry': 'Registro de Abrigo', 'Shelter Service Details': 'Detalhes do serviço de abrigo', 'Shelter Service added': 'Serviço de Abrigo incluído', 'Shelter Service deleted': 'Serviço de Abrigo excluído', 'Shelter Service updated': 'Atualização de serviços de abrigo', 'Shelter Service': 'Serviço de Abrigo', 'Shelter Services': 'Serviços de abrigo', 'Shelter Type Details': 'Detalhes do tiipo de abrigo', 'Shelter Type added': 'Tipo de abrigo incluído', 'Shelter Type deleted': 'Tipo de abrigo excluído', 'Shelter Type updated': 'Abrigos Tipo De atualização', 'Shelter Type': 'Tipo de abrigo', 'Shelter Types and Services': 'Abrigo Tipos e serviços', 'Shelter Types': 'Tipos De abrigo', 'Shelter added': 'Abrigo incluído', 'Shelter deleted': 'Abrigo excluído', 'Shelter updated': 'Abrigo atualizado', 'Shelter': 'Abrigo', 'Shelter/NFI Assistance': 'Abrigo/ Assistência NFI', 'Shelters': 'Abrigos', 'Shipment Created': 'Embarque Criado', 'Shipment Items received by Inventory': 'Itens de Remessa recebidos pelo Inventário', 'Shipment Items sent from Inventory': 'Itens de Remessa enviados pelo Inventário', 'Shipment Items': 'Itens de Carregamento', 'Shipment to Send': 'Carga para Enviar', 'Shipments To': 'Remessas Para', 'Shipments': 'Remessas', 'Shooting': 'Tiroteio', 'Short Assessment': 'Curta Avaliação', 'Short Description': 'Breve Descrição', 'Show Checklist': 'Mostrar Lista De Verificação', 'Show Details': 'Mostrar detalhes', 'Show Map': 'Mostrar Mapa', 'Show Region in Menu?': 'Mostrar Região no Menu?', 'Show on Map': 'Mostrar no mapa', 'Show on map': 'Mostrar no mapa', 'Sign-up as a volunteer': 'Inscrever-se como um voluntário', 'Sign-up for Account': 'Inscrever-se para conta', 'Sign-up succesful - you should hear from us soon!': 'Sua inscriçao foi feita com sucesso - aguarde notícias em breve!', 'Sindhi': 'Sindi', 'Site Administration': 'Administração do site', 'Site or Location': 'Sítio ou Local', 'Site': 'site', 'Sites': 'sites', 'Situation Awareness & Geospatial Analysis': 'Situação Reconhecimento & Geoespaciais Análise', 'Situation': 'Situação', 'Sketch': 'Esboço', 'Skill Catalog': 'Catálogo de Conhecimentos', 'Skill Details': 'Detalhes das habilidades', 'Skill Equivalence Details': 'Detalhes da Equivalência de Habilidade', 'Skill Equivalence added': 'Equivalência de Habilidade incluída', 'Skill Equivalence deleted': 'Equivalência de Habilidade excluída', 'Skill Equivalence updated': 'Equivalência de Habilidade atualizada', 'Skill Equivalence': 'Equivalência de Conhecimentos', 'Skill Equivalences': 'Equivalências de habilidade', 'Skill Provision Catalog': 'Catálogo de habilidades disponível', 'Skill Provision Details': 'Detalhes de habilidades disponível', 'Skill Provision added': 'Provisão de Habilidade incluída', 'Skill Provision deleted': 'Catalogo de habilidades excluído', 'Skill Provision updated': 'Catálogo de habilidades atualizado', 'Skill Provision': 'Provisão de Habilidade', 'Skill Provisions': 'Habilidades disponíveis', 'Skill Status': 'Status da Habilidade', 'Skill TYpe': 'Tipo de habilidade', 'Skill Type Catalog': 'Catálogo de tipos de habilidades', 'Skill Type Details': 'Detalhes do tipo de habilidade', 'Skill Type added': 'Tipo de habilidade incluído', 'Skill Type deleted': 'Tipo de habilidade excluído', 'Skill Type updated': 'Tipo de habilidade atualizado', 'Skill Types': 'Tipos de habilidade', 'Skill added': 'Habilidade incluída', 'Skill deleted': 'Habilidade Excluída', 'Skill updated': 'Habilidade ATUALIZADA', 'Skill': 'QUALIFICAÇÃO', 'Skill/Training': 'Habilidades/Treinamento', 'Skills Catalog': 'Catálogo de habilidades', 'Skills Management': 'Gerenciamento das Habilidades', 'Skills': 'Habilidades', 'Skype ID': 'ID DO Skype', 'Slightly Damaged': 'Ligeiramente Danificado', 'Slope failure, debris': 'falha de inclinação, destroços', 'Small Trade': 'Pequeno Comércio', 'Smoke': 'Fumaça', 'Snapshot Report': 'Relatório de snapshot', 'Snapshot': 'snapshot', 'Snow Fall': 'Queda de neve , nevasca', 'Snow Squall': 'Rajada de neve', 'Soil bulging, liquefaction': 'abaulamento do solo, liquefação', 'Solid waste': 'Resíduos sólidos', 'Solution Details': 'Detalhes da Solução', 'Solution Item': 'Item de Solução', 'Solution added': 'Solução adicionada', 'Solution deleted': 'Solução excluída', 'Solution updated': 'Solução atualizada', 'Solution': 'Solução', 'Solutions': 'Soluções', 'Some': 'Algum', 'Sorry that location appears to be outside the area of the Parent.': 'Desculpe ! Essa localização está fora da área do Pai.', 'Sorry that location appears to be outside the area supported by this deployment.': 'Desculpe ! Essa localização parece estar fora da área suportada por esta implementação.', 'Sorry, I could not understand your request': 'Desculpe, eu não pude entender o seu pedido', 'Sorry, only users with the MapAdmin role are allowed to create location groups.': 'Desculpe, apenas usuários com o perfil MapAdmin tem permissão para criar locais dos grupos.', 'Sorry, only users with the MapAdmin role are allowed to edit these locations': 'Desculpe, apenas usuários com o perfil MapAdmin tem permissão para editar estes locais', 'Sorry, something went wrong.': 'Desculpe, algo deu errado.', 'Sorry, that page is forbidden for some reason.': 'Desculpe ! Esta página tem acesso restrito por alguma razão.', 'Sorry, that service is temporary unavailable.': 'Desculpe ! Este serviço está indisponível temporariamente.', 'Sorry, there are no addresses to display': 'Desculpe ! Não há endereços para visualizar.', 'Source ID': 'ID de origem', 'Source Time': 'Origem do tempo', 'Source': 'source', 'Sources of income': 'Fontes de rendimento', 'Space Debris': 'Destroços Espaciais', 'Spanish': 'espanhol', 'Special Ice': 'Gelo Especial', 'Special Marine': 'Marinha especial', 'Specialized Hospital': 'Hospital especializado.', 'Specific Area (e.g. Building/Room) within the Location that this Person/Group is seen.': 'Área específica (exemplo: edifício/quarto) com a localização de onde essa pessoa/grupo é visto.', 'Specific locations need to have a parent of level': 'Locais específicos precisam ter um nível paterno.', 'Specify a descriptive title for the image.': 'Especifique um título descritivo para a imagem.', 'Specify the bed type of this unit.': 'Especifique o tipo de cama dessa unidade.', 'Specify the number of available sets': 'Especificar o número de conjuntos disponíveis', 'Specify the number of available units (adult doses)': 'Especifique o número de unidades disponíveis (doses para adultos)', 'Specify the number of available units (litres) of Ringer-Lactate or equivalent solutions': 'Especificar o número de unidades disponíveis (litros) de Ringer-Lactato ou soluções equivalentes', 'Specify the number of sets needed per 24h': 'Especificar o número de conjuntos necessários por 24h', 'Specify the number of units (adult doses) needed per 24h': 'Especificar o número de unidades (doses para adultos) necessário por 24h', 'Specify the number of units (litres) of Ringer-Lactate or equivalent solutions needed per 24h': 'Especificar o número de unidades (litros) de Ringer-Lactato ou soluções equivalentes necessárias para 24h', 'Spherical Mercator?': 'Mapa Mercator Esférico?', 'Spreadsheet Importer': 'PLANILHA IMPORTADOR', 'Spreadsheet uploaded': 'Planilha transferido por UPLOAD', 'Spring': 'Primavera', 'Squall': 'Rajada', 'Staff & Volunteers': 'Colaboradores & Voluntários', 'Staff 2': 'Equipe 2', 'Staff Details': 'Equipe Detalhes', 'Staff ID': 'ID da equipe', 'Staff List': 'Lista de pessoal', 'Staff Member Details': 'Detalhes de membro da equipe', 'Staff Members': 'Membros da equipe', 'Staff Record': 'Registro de pessoal', 'Staff Type Details': 'Equipe Tipo Detalhes', 'Staff Type added': 'Equipe tipo incluído', 'Staff Type deleted': 'Tipo De equipe excluído', 'Staff Type updated': 'Equipe Tipo De atualização', 'Staff Types': 'Tipos de equipe', 'Staff added': 'Equipe incluída', 'Staff and Volunteers': 'Funcionários e Voluntários', 'Staff deleted': 'Equipe excluída', 'Staff member added': 'Membro da equipe incluído', 'Staff member updated': 'Membro da equipe atualizado', 'Staff present and caring for residents': 'Equipe presente e cuidando de moradores', 'Staff updated': 'Equipe atualizado', 'Staff': 'Equipe', 'Staff2': 'staff2', 'Staffing': 'Equipe', 'Stairs': 'Escadas', 'Start Date': 'Data do início', 'Start date': 'Data Inicial', 'Start of Period': 'Início do Período', 'State': 'Status', 'Stationery': 'Papel de Carta', 'Status Report': 'Relatório de status', 'Status Updated': 'Status atualizado', 'Status added': 'Estado adicionado', 'Status deleted': 'Estado excluído', 'Status of clinical operation of the facility.': 'Estado da operação clínica da instalação.', 'Status of general operation of the facility.': 'Estado da operação geral da instalação.', 'Status of morgue capacity.': 'Estado da capacidade da morgue.', 'Status of operations of the emergency department of this hospital.': 'Estado das operações do Departamento de Emergência deste hospital.', 'Status of security procedures/access restrictions in the hospital.': 'Estado dos procedimentos de segurança/Restrições de Acesso no hospital.', 'Status of the operating rooms of this hospital.': 'Status das salas de operação deste hospital.', 'Status updated': 'Status atualizado', 'Steel frame': 'Estrutura de aço', 'Stolen': 'Roubado', 'Store spreadsheets in the Eden database': 'Arquivar as planilhas no banco de dados Eden', 'Storeys at and above ground level': 'Andares e no nível do solo acima', 'Storm Force Wind': 'Tempestade Força Vento', 'Storm Surge': 'ressaca', 'Stowaway': 'Penetra', 'Street Address': 'Endereço residencial', 'Strong Wind': 'vento forte', 'Structural Hazards': 'riscos estruturais', 'Structural': 'estrutural', 'Style Field': 'Estilo do Campo', 'Style Values': 'Estilo dos Valores', 'Sub-type': 'Subtipo', 'Subject': 'assunto', 'Submission successful - please wait': 'envio bem sucedido - por favor aguarde', 'Submission successful - please wait...': 'envio bem sucedido - por favor aguarde...', 'Submit New (full form)': 'Submeter Novo (formulário completo)', 'Submit New (triage)': 'Submeter novo (triagem)', 'Submit New': 'Submeter Novamente', 'Submit a request for recovery': 'envie um pedido de recuperação', 'Submit new Level 1 assessment (full form)': 'Submeter novo nível 1 de avaliação (formulário completo)', 'Submit new Level 1 assessment (triage)': 'Submeter novo nível 1 de avaliação (triagem)', 'Submit new Level 2 assessment': 'Submeter novo nível 2 de avaliação', 'Subscription Details': 'Detalhes da Assinatura', 'Subscription added': 'Assinatura Incluída', 'Subscription deleted': 'Assinatura Excluída', 'Subscription updated': 'Assinatura ATUALIZADO', 'Subscriptions': 'assinaturas', 'Subsector Details': 'Detalhes de subsetor', 'Subsector added': 'Subsetor incluído', 'Subsector deleted': 'Subsetor excluído', 'Subsector updated': 'Subsetor atualizado', 'Subsector': 'Subsetor', 'Subsectors': 'Subsetores', 'Subsistence Cost': 'custo de subsistencia', 'Suburb': 'Subúrbio', 'Suggest not changing this field unless you know what you are doing.': 'Sugerimos não alterar esse campo a menos que você saiba o que está fazendo.', 'Summary by Administration Level': 'Resumo por Nível de Administração', 'Summary': 'Sumário', 'Sunday': 'Domingo', 'Supplies': 'Suprimentos', 'Support Request': 'Pedido de Suporte', 'Support Requests': 'Pedidos de Suporte', 'Supports the decision making of large groups of Crisis Management Experts by helping the groups create ranked list.': 'Suporta a tomada de decisão de grandes grupos de Especialistas em Gestão de Crises ajudando os grupos a criar listas de classificados.', 'Sure you want to delete this object?': 'Tem certeza que você quer excluir este objeto?', 'Surgery': 'Cirurgia', 'Survey Answer Details': 'Detalhes da Resposta da Pesquisa', 'Survey Answer added': 'Incluído Resposta da Pesquisa', 'Survey Answer deleted': 'Excluído a Resposta da Pesquisa', 'Survey Answer updated': 'Resposta da Pesquisa atualizada', 'Survey Answer': 'Resposta da Pesquisa', 'Survey Module': 'Módulo de Pesquisa', 'Survey Name': 'Nome da Pesquisa', 'Survey Question Details': 'Detalhes da Pergunta de Pesquisa', 'Survey Question Display Name': 'Nome da pergunta de pesquisa', 'Survey Question added': 'Pergunta de pesquisa incluída', 'Survey Question deleted': 'Pergunta de pesquisa excluída', 'Survey Question updated': 'Pergunta de pesquisa atualizada', 'Survey Question': 'Questão de Pesquisa de Opinião', 'Survey Section Details': 'Detalhes de Seção de Pesquisa', 'Survey Section Display Name': 'Seção de pesquisa do nome de exibição', 'Survey Section added': 'Seção de Pesquisa incluída', 'Survey Section deleted': 'Seção de Pesquisa excluída', 'Survey Section updated': 'Seção de pesquisa atualizada', 'Survey Section': 'Seção da Pesquisa de Opinião', 'Survey Series Details': 'Série de Pesquisa Detalhes', 'Survey Series Name': 'Nome de Série de Pesquisa', 'Survey Series added': 'Série de Pesquisa incluída', 'Survey Series deleted': 'Série de Pesquisa excluída', 'Survey Series updated': 'Série de Pesquisa atualizada', 'Survey Series': 'Série de Pesquisa', 'Survey Template Details': 'Definir detalhes do formulário', 'Survey Template added': 'Modelo de Pesquisa incluído', 'Survey Template deleted': 'Modelo de Pesquisa excluído', 'Survey Template updated': 'Definição de formulário actualizada', 'Survey Template': 'Modelo de Pesquisa de Opinião', 'Survey Templates': 'Definir formulários', 'Symbology': 'Simbologia', 'Sync Conflicts': 'Conflitos de Sincronização', 'Sync History': 'Histórico de Sincronização', 'Sync Now': 'Sincronizar Agora', 'Sync Partners are instances or peers (SahanaEden, SahanaAgasti, Ushahidi, etc.) that you want to sync information with. Click on the link on the right to go the page where you can add sync partners, search for sync partners and modify them.': 'PARCEIROS DE Sincronização são instâncias ou PARES (SahanaEden, SahanaAgasti, Ushahidi, etc. ) que você deseja a informação de sincronização com. Clique no link sobre o direito de ir a página em que você pode incluir parceiros de sincronização, procurar por parceiros de sincronização e Modificá-las.', 'Sync Partners': 'Sincronizar parceiros', 'Sync Pools': 'Conjuntos de Sincronização', 'Sync Schedule': 'Planejamento de Sincronização', 'Sync Settings': 'Configurações de Sincronização', 'Sync process already started on': 'Processo de Sincronização já iniciado em', 'Synchronisation': 'Sincronização', 'Synchronization Conflicts': 'Conflitos de Sincronização', 'Synchronization Details': 'Detalhes de Sincronização', 'Synchronization History': 'Histórico de Sincronização', 'Synchronization Peers': 'Parceiros de Sincronização', 'Synchronization Settings': 'Configurações de sincronização', 'Synchronization allows you to share data that you have with others and update your own database with latest data from other peers. This page provides you with information about how to use the synchronization features of Sahana Eden': 'Sincronização permite compartilhar dados que você tenha com outros e Atualizar seu próprio banco de dados com informações recentes de outros parceiros. Esta página fornece informações sobre como utilizar os recursos de sincronização de Sahana Éden', 'Synchronization not configured.': 'Sincronização não Configurada.', 'Synchronization settings updated': 'Configurações de sincronização atualizadas', 'Synchronization': 'Sincronização', 'Syncronisation History': 'Histórico De Sincronização', 'Take shelter in place or per <instruction>': 'Abrigue-se no local ou por', 'Task Details': 'Detalhes da Tarefa', 'Task List': 'Lista de tarefas', 'Task Status': 'Status da tarefa', 'Task added': 'Task Inclusa', 'Task deleted': 'Tarefa excluída', 'Task updated': 'Tarefa atualizada', 'Tasks': 'Tarefas', 'Team Description': 'Descrição da Equipe', 'Team Details': 'Detalhes da Equipe', 'Team ID': 'ID da Equipe', 'Team Id': 'Id da Equipe', 'Team Leader': 'Líder de Equipe', 'Team Member added': 'Membro da equipe incluído', 'Team Members': 'Membros da equipe', 'Team Name': 'Nome da equipe', 'Team Type': 'Tipo de equipe', 'Team added': 'Equipe incluída', 'Team deleted': 'Equipe excluída', 'Team updated': 'Equipa actualizada', 'Team': 'Equipe', 'Teams': 'Equipes', 'Technical testing only, all recipients disregard': 'Apenas teste técnico, todos os recipientes ignorem', 'Telecommunications': 'Telecomunicações', 'Telephone': 'Telefone', 'Telephony': 'Telefonia', 'Temp folder %s not writable - unable to apply theme!': 'PASTA Temp%s não gravável-impossível aplicar tema!', 'Template file %s not readable - unable to apply theme!': 'Modelo% arquivo não é Legível-impossível aplicar tema!', 'Templates': 'modelos', 'Term for the fifth-level within-country administrative division (e.g. a voting or postcode subdivision). This level is not often used.': 'Termo para o 5º nível de divisão administrativa nacional (por exemplo, uma subdivisão de código postal ou de zona de votação). Este nível não é frequentemente utilizado.', 'Term for the fourth-level within-country administrative division (e.g. Village, Neighborhood or Precinct).': 'Termo para o 4º nível de divisão administrativa nacional(por exemplo, vila, bairro ou distrito).', 'Term for the primary within-country administrative division (e.g. State or Province).': 'Prazo para a principal divisão administrativa dentro do país (i.e. Estado ou Distrito).', 'Term for the secondary within-country administrative division (e.g. District or County).': 'Prazo para a Secundária divisão administrativa dentro do país (por exemplo, Bairro ou Município).', 'Term for the secondary within-country administrative division (e.g. District).': 'Prazo para a Secundária divisão administrativa dentro do país (i.e. Bairro).', 'Term for the third-level within-country administrative division (e.g. City or Town).': 'Prazo para o 3ᵉʳ nível de divisão administrativa dentro do país (por exemplo, Cidade ou Municipio).', 'Term for the top-level administrative division (i.e. Country).': 'Prazo para a divisão administrativa de nível superior (por exemplo País).', 'Term for the top-level administrative division (typically Country).': 'Prazo para a divisão administrativa de nível superior (geralmente País).', 'Territorial Authority': 'Autoridade territoriais', 'Terrorism': 'Terrorismo', 'Tertiary Server (Optional)': 'Servidor terciário (opcional)', 'Text Color for Text blocks': 'Cor de texto para os blocos de texto', 'Text before each Text Field (One per line)': 'Texto antes de cada campo de texto (um por linha)', 'Text': 'texto', 'Thank you for validating your email. Your user account is still pending for approval by the system administator (%s).You will get a notification by email when your account is activated.': 'Obrigado para validar seu e-mail. Sua conta de usuário ainda está pendente para aprovação pelo administrador do Sistema (%s). você receberá uma notificação por e-mail quando sua conta esteja ativada.', 'Thanks for your assistance': 'Obrigado por sua ajuda', 'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1 == db.table2.field2" results in a SQL JOIN.': 'O "query" é uma condição como "db.table1.field1==\'value\'". Algo como "db.table1.field1 == db.table2.field2" resulta em uma junção SQL.', 'The Area which this Site is located within.': 'A área que este Site está localizado', 'The Assessments module allows field workers to send in assessments.': 'O Modulo Avaliações permite aos trabalhadores de campo que enviem avaliações.', 'The Author of this Document (optional)': 'O autor deste documento (opcional)', 'The Building Asssesments module allows building safety to be assessed, e.g. after an Earthquake.': 'O módulo avaliações De Construção permite a segurança edifício a ser avaliada, por exemplo, depois de um terremoto.', 'The Camp this Request is from': 'O Alojamento neste pedido é de', 'The Camp this person is checking into.': 'O Alojamento que esta pessoa está se registrando.', 'The Current Location of the Person/Group, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'O local atual do Usuário/Grupo, que pode ser geral (para relatórios) ou precisa (para exibir em um mapa). Digite alguns caracteres para procurar nos locais disponíveis.', 'The Email Address to which approval requests are sent (normally this would be a Group mail rather than an individual). If the field is blank then requests are approved automatically if the domain matches.': 'O endereço de e-mail para onde os pedidos de aprovação são enviados (normalmente seria um correio de Grupo ao invés de um individual). Se o campo estiver em branco, os pedidos são aprovados automaticamente se o domínio corresponder.', 'The Incident Reporting System allows the General Public to Report Incidents & have these Tracked.': 'O Sistema de Comunicação de Incidentes permite o Público em Geral reportar incidentes & ter esses rastreados.', 'The Location the Person has come from, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'A Localização da Pessoa vem do, que pode ser geral (para relatórios) ou precisa (para exibir em um mapa). Digite alguns caracteres para procurar nos locais disponíveis.', 'The Location the Person is going to, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'O local que a pessoa vai, que pode ser genérico (para Relatórios) ou preciso (para exibir em um mapa). Digite alguns caracteres para procurar nos locais disponíveis.', 'The Media Library provides a catalog of digital media.': 'A Biblioteca de mídias fornece um catálogo de mídia digital.', 'The Messaging Module is the main communications hub of the Sahana system. It is used to send alerts and/or messages using SMS & Email to various groups and individuals before, during and after a disaster.': 'O módulo de mensagens é o hub de comunicação principal do sistema Sahana. É utilizado para enviar alertas e/ou mensagens utilizando o SMS & e-mail para diferentes grupos e indivíduos antes, durante e após um desastre.', 'The Organization Registry keeps track of all the relief organizations working in the area.': 'O registro Da Organização mantém controle de todos as organizações de apoio que trabalham na área.', 'The Organization Registry keeps track of all the relief organizations working in the disaster region. It captures not only the places where they are active, but also captures information on the range of projects they are providing in each area.': 'O registro da Organização mantém controle de todas organizações de ajuda trabalhando numa região de desastre. Ele captura não apenas os locais onde elas estão ativas, mas também captura informações sobre o conjunto de projetos que está fornecendo em cada região.', 'The Person currently filling this Role.': 'A pessoa atualmente preenchendo esta função.', 'The Project Tracking module allows the creation of Activities to meet Gaps in Needs Assessments.': 'O módulo acompanhamento do projeto permite a criação de atividades para preencher Lacunas nas avaliações de necessidades.', 'The Requests Management System is a central online repository where all relief organizations, relief workers, government agents and camp sites for displaced personnel can coordinate the supply of aid with their demand. It allows users to allocate the available resources to fulfill the demands effectively and efficiently.': 'O sistema De Gerenciamento De Pedidos é um repositório online central em todas as organizações de ajuda, trabalhadores de assistência, agentes do governo e sites de acampamento para a equipe de refugiados pode coordenar o fornecimento da ajuda com seu pedido. Ela permite que usuários aloquem os recursos disponíveis para suprir as demandas de forma efetiva e eficiente.', 'The Role this person plays within this hospital.': 'A Função desta pessoa neste hospital.', 'The Role to which this Role reports.': 'A função à qual essa função responde.', 'The Shelter Registry tracks all shelters and stores basic details regarding them. It collaborates with other modules to track people associated with a shelter, the services available etc.': 'O registro do Abrigo rastreia todos os detalhes básicos abrigos e armazena sobre eles. Ele colabora com outros módulos para rastrear as pessoas associadas com um abrigo, os serviços disponíveis etc.', 'The Shelter this Request is from (optional).': 'O pedido este Abrigo é de (opcional).', 'The Shelter this Request is from': 'O pedido deste abrigo é de', 'The Shelter this person is checking into.': 'O abrigo esta pessoa está verificando no.', 'The URL for the GetCapabilities of a WMS Service whose layers you want accessible via the Map.': 'A URL para o GetCapabilities de um serviço WMS cujas camadas você deseja acessíveis através do mapa.', 'The URL for the GetCapabilities page of a Web Map Service (WMS) whose layers you want available via the Browser panel on the Map.': 'A URL para a página do GetCapabilities de um Web Map Service (WMS), cujas camadas que você deseja disponíveis através do painel do navegador no Mapa.', 'The URL of your web gateway without the post parameters': 'A URL de seu gateway da web sem os parâmetros post', 'The URL to access the service.': 'A URL para acessar o serviço.', 'The Unique Identifier (UUID) as assigned to this facility by the government.': 'O Idenfificador Único (UUID) conforme designado pelo governo para esta filial.', 'The asset must be assigned to a site OR location.': 'O ativo deve ser assinalado para um site ou local.', 'The attribute which is used for the title of popups.': 'O atributo que é usado para o título de popups.', 'The attribute within the KML which is used for the title of popups.': 'O Atributo dentro do KML que é utilizado para o título dos pop-ups.', 'The attribute(s) within the KML which are used for the body of popups. (Use a space between attributes)': 'O Atributo(s) no KML que são utilizados para o corpo dos pop-ups. ( utilizar um espaço entre atributos )', 'The body height (crown to heel) in cm.': 'A altura do corpo (cabeça até o calcanhar) em cm.', 'The contact person for this organization.': 'A pessoa de contato nessa organização.', 'The country the person usually lives in.': 'O país que a pessoa vive habitualmente', 'The default Organization for whom this person is acting.': 'A Organização padrão para quem esta pessoa está atuando.', 'The default Organization for whom you are acting.': 'A Organização padrão para quem você está atuando.', 'The duplicate record will be deleted': 'O registro duplicado será excluído', 'The first or only name of the person (mandatory).': 'O primeiro nome ou único nome da pessoa (obrigatório).', 'The form of the URL is http://your/web/map/service?service=WMS&request=GetCapabilities where your/web/map/service stands for the URL path to the WMS.': 'O formulário da URL é http://your/web/map/service?service=WMS&request=GetCapabilities where your/web/map/service que representa o caminho da URL para o WMS.', 'The language you wish the site to be displayed in.': 'O idioma que você deseja que o site seja exibido.', 'The last known location of the missing person before disappearance.': 'A última localização conhecida da pessoa desaparecida antes do desaparecimento.', 'The list of Brands are maintained by the Administrators.': 'A lista de Marcas serão mantidas pelos administradores.', 'The list of Catalogs are maintained by the Administrators.': 'A lista de catálogos é mantida pelos administradores.', 'The list of Item categories are maintained by the Administrators.': 'A lista de categorias dos itens são mantidas pelos administradores.', 'The map will be displayed initially with this latitude at the center.': 'O mapa será exibido inicialmente com esta latitude no centro.', 'The map will be displayed initially with this longitude at the center.': 'O mapa será exibido inicialmente com esta longitude no centro.', 'The minimum number of features to form a cluster.': 'O número mínimo de recursos para formar um cluster.', 'The name to be used when calling for or directly addressing the person (optional).': 'O nome a ser usado ao chamar por ou diretamente endereçar a pessoa (opcional).', 'The next screen will allow you to detail the number of people here & their needs.': 'A próxima tela permitirá que você detalhe o número de pessoas aqui e as suas necessidades.', 'The number of Units of Measure of the Alternative Items which is equal to One Unit of Measure of the Item': 'O número de unidades de medida dos Itens alternativos é igual a uma unidade de medida do Item', 'The number of pixels apart that features need to be before they are clustered.': 'O número de separado de pixels de funcionalidades tem que ser antes que eles sejam agrupados.', 'The number of tiles around the visible map to download. Zero means that the 1st page loads faster, higher numbers mean subsequent panning is faster.': 'O número de títulos em torno do mapa visível para fazer download. Zero significa que a primeira página carrega mais rápido, números maiores que zero significam que as paginas seguintes são mais rápida.', 'The person at the location who is reporting this incident (optional)': 'A pessoa no local que está relatando este incidenten (opcional)', 'The person reporting the missing person.': 'A pessoa reportando o desaparecimento de alguem', 'The post variable containing the phone number': 'A variavel post contendo o numero de telefone', 'The post variable on the URL used for sending messages': 'A variável post no URL é utilizada para enviar mensagens', 'The post variables other than the ones containing the message and the phone number': 'As variáveis post diferentes das que contém a mensagem e o número de telefone', 'The serial port at which the modem is connected - /dev/ttyUSB0, etc on linux and com1, com2, etc on Windows': 'A porta serial no qual o modem está conectado-/dev/ttyUSB0, etc. No linux e com1, com2, etc. No Windows', 'The server did not receive a timely response from another server that it was accessing to fill the request by the browser.': 'O servidor não receber uma resposta oportuna de outro servidor que ele estava acessando para preencher o pedido pelo navegador.', 'The server received an incorrect response from another server that it was accessing to fill the request by the browser.': 'O servidor recebeu uma resposta incorreta a partir de outro servidor que ele estava acessando para preencher o pedido pelo navegador.', 'The site where this position is based.': 'O local onde esta posição se baseia.', 'The staff responsibile for Facilities can make Requests for assistance. Commitments can be made against these Requests however the requests remain open until the requestor confirms that the request is complete.': 'O pessoal responsável pelas Instalações podem fazer pedidos de assistência. Compromissos podem ser feitas em relação a esses pedidos no entanto os pedidos permanecem abertas até o SOLICITANTE confirma que o pedido foi concluído.', 'The subject event no longer poses a threat or concern and any follow on action is described in <instruction>': 'O acontecimento já não representa uma ameaça ou preocupação e a ação a ser tomada é descrita em<instruction>', 'The time at which the Event started.': 'O momento em que o evento começou.', 'The title of the WMS Browser panel in the Tools panel.': 'O título do painel do navegador WMS em ferramentas.', 'The token associated with this application on': 'O token associado a este aplicativo em', 'The unique identifier which identifies this instance to other instances.': 'O indentificador único diferencia esta instância de outras.', 'The way in which an item is normally distributed': 'O modo em que um item é normalmente distribuído', 'The weight in kg.': 'O peso em quilogramas.', 'The': 'O', 'Theme Details': 'Detalhes do Tema', 'Theme added': 'Tema incluído', 'Theme deleted': 'Tema excluído', 'Theme updated': 'Tema atualizado', 'Theme': 'Tema', 'Themes': 'Temas', 'There are errors': 'Há erros', 'There are insufficient items in the Inventory to send this shipment': 'não há itens suficientes no armazém para o envio desse carregamento', 'There are multiple records at this location': 'Há vários registros neste local', 'There are not sufficient items in the Inventory to send this shipment': 'não há itens suficientes no inventário para enviar esse carregamento', 'There is no address for this person yet. Add new address.': 'Não há endereço para esta pessoa ainda. Adicionar novo endereço.', 'These are settings for Inbound Mail.': 'Estas são as configurações para Correio de entrada.', 'These are the Incident Categories visible to normal End-Users': 'Estes são as Categorias de incidentes visíveis para usuários finais normais.', 'These need to be added in Decimal Degrees.': 'estas precisam ser incluídas em graus decimais.', 'They': 'Eles', 'This Group has no Members yet': 'Sem membros registrados atualmente', 'This Team has no Members yet': 'Sem membros registrados atualmente', 'This appears to be a duplicate of': 'Isto parece ser duplicado de', 'This file already exists on the server as': 'Este arquivo já existe como no servidor', 'This is appropriate if this level is under construction. To prevent accidental modification after this level is complete, this can be set to False.': 'Isso é apropriado se esse nível estiver em construção. Para evitar modificação acidental após esse nível estar concluído, pode ser configurado como False.', 'This is the way to transfer data between machines as it maintains referential integrity.': 'Este é o caminho para a transferência de dados entre máquinas que mantém a integridade referencial.', 'This is the way to transfer data between machines as it maintains referential integrity...duplicate data should be removed manually 1st!': 'Este é o caminho para a transferência de dados entre máquinas que mantém a integridade referencial...duplicado dados devem ser removidos manualmente 1ᵉʳ!', 'This level is not open for editing.': 'Este nível não é aberto para edição.', 'This might be due to a temporary overloading or maintenance of the server.': 'Isso pode ser devido a uma sobrecarga temporária ou manutenção do servidor.', 'This module allows Inventory Items to be Requested & Shipped between the Inventories of Facilities.': 'Este módulo permite que itens de inventário sejam Solicitados & Enviados entre os Inventários das instalações.', 'This module allows you to plan scenarios for both Exercises & Events. You can allocate appropriate Resources (Human, Assets & Facilities) so that these can be mobilized easily.': 'Este módulo permite que você planeje cenários para os Exercícios & Eventos. Você pode alocar apropriado recursos (humanos, Ativos e Recursos) para que estes possam ser mobilizados facilmente.', 'This page shows you logs of past syncs. Click on the link below to go to this page.': 'Esta página mostra as logs das sincronizações passadas. Clique no link abaixo para ir para essa página.', 'This screen allows you to upload a collection of photos to the server.': 'Esta tela permite que você faça upload de um conjunto de fotografias para o servidor.', 'This setting can only be controlled by the Administrator.': 'Esta definicão só pode ser controlado pelo administrador.', 'This shipment has already been received.': 'Este carregamento já foi recebido.', 'This shipment has already been sent.': 'Este carregamento já foi enviado.', 'This shipment has not been received - it has NOT been canceled because can still be edited.': 'Este carregamento não foi recebido-ele não foi cancelado porque ainda pode ser editado.', 'This shipment has not been sent - it has NOT been canceled because can still be edited.': 'Este carregamento não foi enviado- ele não foi cancelado porque ainda pode ser editado.', 'This shipment will be confirmed as received.': 'Este carregamento será confirmado como recebido.', 'This value adds a small mount of distance outside the points. Without this, the outermost points would be on the bounding box, and might not be visible.': 'Esse valor inclui um pequeno valor de distância fora dos pontos. Sem isto, os pontos mais afastados estariam na caixa delimitadora, e podem não estar visíveis.', 'This value gives a minimum width and height in degrees for the region shown. Without this, a map showing a single point would not show any extent around that point. After the map is displayed, it can be zoomed as desired.': 'Este valor fornece uma largura e altura minimas em graus para a região mostrada. Sem isto, um mapa que mostre um ponto único não mostraria nenhuma extensão ao redor desse ponto. Depois que o mapa for exibido, pode ser ampliado, conforme desejado.', 'Thunderstorm': 'Trovoada', 'Thursday': 'Quinta-feira', 'Ticket Details': 'Detalhes do bilhete', 'Ticket ID': 'ID do Bilhete', 'Ticket added': 'Bilhete incluído', 'Ticket deleted': 'Bilhete removido', 'Ticket updated': 'Bilhete atualizado', 'Ticket': 'Bilhete', 'Ticketing Module': 'Módulo de bilhetes', 'Tickets': 'Bilhetes', 'Tilt-up concrete': 'Inclinar concreto', 'Timber frame': 'Quadro de madeira', 'Timeline Report': 'Relatório de períodos de tempo', 'Timeline': 'Prazo', 'Title to show for the Web Map Service panel in the Tools panel.': 'Título para mostrar o painel de serviço de Mapa da Web no painel de Ferramentas.', 'Title': 'título', 'To Location': 'Localidade de destino', 'To Person': 'Para Pessoa', 'To begin the sync process, click the button on the right =>': 'Para iniciar o processo de Sincronização, clique no botão à direita.', 'To begin the sync process, click this button =>': 'Para iniciar o processo de Sincronização, clique neste botão.', 'To create a personal map configuration, click': 'Para criar uma configuração do mapa pessoal, clique', 'To edit OpenStreetMap, you need to edit the OpenStreetMap settings in models/000_config.py': 'Para editar OpenStreetMap, você precisa editar as configurações do OpenStreetMap em models/000_config.py', 'To search by job title, enter any portion of the title. You may use % as wildcard.': 'Para pesquisar por título, digite qualquer parte do título. Pode utilizar o % como um substituto para qualquer caracter.', 'To variable': 'Para variável', 'To': 'para', 'Tools': 'ferramentas', 'Tornado': 'tornado', 'Total # of Target Beneficiaries': 'Nº Total de Beneficiários De Destino', 'Total # of households of site visited': 'Nº Total de famílias de site Visitado', 'Total Beds': 'Total de Camas', 'Total Beneficiaries': 'Total de Beneficiários', 'Total Cost per Megabyte': 'Custo Total por Megabyte', 'Total Cost per Minute': 'Custo Total por Minuto', 'Total Monthly Cost': 'Custo Total mensal', 'Total Monthly Cost:': 'Custo Total mensal:', 'Total Monthly': 'Total Mensal', 'Total One-time Costs': 'Total Um tempo de Custos', 'Total Persons': 'Totalizar Pessoas', 'Total Recurring Costs': 'Totalizar Custos Recorrentes', 'Total Unit Cost': 'Total do custo unitário', 'Total Unit Cost:': 'Custo Unitário Total:', 'Total Units': 'Total de unidades', 'Total gross floor area (square meters)': 'Total de área bruta (metros quadrados)', 'Total number of beds in this hospital. Automatically updated from daily reports.': 'Número Total de leitos neste hospital. Atualizado automaticamente a partir de relatórios diários.', 'Total number of houses in the area': 'Número Total de casas na área', 'Total number of schools in affected area': 'Número Total de escolas em área afetada', 'Total population of site visited': 'Totalizar População do site Visitado', 'Totals for Budget:': 'Total para Orçamento', 'Totals for Bundle:': 'Total do Pacote', 'Totals for Kit:': 'Totais para Kit', 'Tourist Group': 'Grupo turístico', 'Town': 'Urbano', 'Traces internally displaced people (IDPs) and their needs': 'Rastreia pessoas deslocadas internamente (PDI) e suas necessidades', 'Tracing': 'Rastreio', 'Track Details': 'Detalhes do restraio', 'Track deleted': 'Rastreio excluído', 'Track updated': 'Rastreamento atualizado', 'Track uploaded': 'Rastreamento enviado', 'Track with this Person?': 'RASTREAR com esta pessoa?', 'Track': 'Rastrear', 'Tracking of Projects, Activities and Tasks': 'Rastreamento de projetos, atividades e tarefas', 'Tracking of basic information on the location, facilities and size of the Shelters': 'Rastreamento de informações básicas sobre a localização, instalações e tamanho dos abrigos', 'Tracks the location, distibution, capacity and breakdown of victims in Shelters': 'Rastreia o local, distribuição, capacidade e discriminação da vítima em Abrigos', 'Traffic Report': 'Relatório de tráfego', 'Training Course Catalog': 'Catálogo de cursos de treinamento', 'Training Details': 'Detalhes do treinamento', 'Training added': 'Treinamento incluído', 'Training deleted': 'Treinamento excluído', 'Training updated': 'Treinamento atualizado', 'Training': 'Treinamento', 'Trainings': 'Treinamentos', 'Transit Status': 'Status do Transito', 'Transit': 'Trânsito', 'Transition Effect': 'Efeito de Transição', 'Transparent?': 'TRANSPARENTE?', 'Transportation assistance, Rank': 'Assistência de transporte, Classificação', 'Trauma Center': 'Centro de traumas', 'Travel Cost': 'Custo da Viagem', 'Tropical Storm': 'Tempestade Tropical', 'Tropo Messaging Token': 'Sinal de Mensagem Tropo', 'Tropo Settings': 'Configurações esteja doido parceiro', 'Tropo Voice Token': 'Sinal de Voz Tropo', 'Tropo settings updated': 'Configurações Tropo Atualizadas', 'Tropo': 'substiuir, mudar', 'Truck': 'Caminhão', 'Try checking the URL for errors, maybe it was mistyped.': 'Tente verificar se existem erros na URL, talvez tenha sido um erro de digitação', 'Try hitting refresh/reload button or trying the URL from the address bar again.': 'Tente apertar o botão atualizar/recarregar ou tente a URL a partir da barra de endereços novamente', 'Try refreshing the page or hitting the back button on your browser.': 'Tente atualizar a página ou apertar o botão voltar em seu navegador.', 'Tuesday': 'Terça-feira', 'Twitter ID or #hashtag': 'ID Twitter ou #hashtag', 'Twitter Settings': 'Configurações do Twitter', 'Type of Construction': 'Tipo de Construção', 'Type of water source before the disaster': 'Tipo de fonte de água antes do desastre', 'Type': 'type', 'UID': 'uid', 'UN': 'ONU', 'URL': 'Localizador-Padrão de Recursos', 'Un-Repairable': 'ONU-Reparáveis', 'Unable to parse CSV file!': 'Não é possível analisar Arquivo CSV!', 'Understaffed': 'Pessoal', 'Unidentified': 'Não identificado', 'Unit Cost': 'Custo por unidade', 'Unit added': 'Unidade incluída', 'Unit deleted': 'Unidade Excluída', 'Unit of Measure': 'Unidade de medida', 'Unit updated': 'Unidade Atualizados', 'Units': 'Unidades', 'Unknown Peer': 'Peer desconhecido', 'Unknown type of facility': 'Tipo desconhecido de instalação', 'Unknown': 'unknown', 'Unreinforced masonry': 'Alvenaria obras', 'Unresolved Conflicts': 'Conflitos não resolvidos', 'Unsafe': 'Inseguro', 'Unselect to disable the modem': 'Desmarcar para desativar o modem', 'Unsent': 'não enviado', 'Unsupported data format!': 'Formato de dados não Suportado!', 'Unsupported method!': 'Método não Suportado!', 'Update Activity Report': 'Atualizar Relatório de atividade', 'Update Cholera Treatment Capability Information': 'Atualizar informações de capacidade de tratamento de Cólera', 'Update Request': 'Atualizar Pedido', 'Update Service Profile': 'Atualizar Perfil de Serviço', 'Update Status': 'Status da Atualização', 'Update Task Status': 'Atualizar Status da Tarefa', 'Update Unit': 'Atualizar Unidade', 'Update if Master': 'Atualizar se for o principal', 'Update if Newer': 'Atualizar se Mais Recente', 'Update your current ordered list': 'ATUALIZE a seu atual lista ordenada', 'Update': 'atualização', 'Updated By': 'Atualizado por', 'Upload Photos': 'Fazer Upload de Fotos', 'Upload Spreadsheet': 'Fazer atualizacao de Planilha', 'Upload Track': 'Pista de carregamento', 'Upload a Spreadsheet': 'Fazer Upload de uma planilha', 'Upload an image file (bmp, gif, jpeg or png), max. 300x300 pixels!': 'Fazer Upload de um arquivo de imagem (bmp, gif, jpeg ou png), máx. 300x300 pixels!', 'Upload an image file here.': 'Fazer atualizacao de um arquivo de imagem aqui.', 'Upload an image, such as a photo': 'Fazer Upload de uma imagem, como uma foto', 'Urban Fire': 'Incêndio urbano', 'Urban area': 'Zona Urbana', 'Urgent': 'Urgente', 'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Utilize (...)&(...) para e, (...)|(...) ou para, e ~(...) para não para construir consultas mais complexas.', 'Use Geocoder for address lookups?': 'Utiliza Geocodificador para consultas de endereços?', 'Use default': 'usar o padrão', 'Use these links to download data that is currently in the database.': 'Use estes links para fazer o download de dados actualmente na base de dados.', 'Used by IRS & Assess': 'Utilizado pela Receita Federal & Avaliar', 'Used in onHover Tooltip & Cluster Popups to differentiate between types.': 'Utilizado em onHover De Dicas & Cluster Popups para diferenciar entre tipos.', 'Used to build onHover Tooltip & 1st field also used in Cluster Popups to differentiate between records.': 'Utilizado para construir onHover Dicas & primeiro campo também utilizado no Popups Cluster para diferenciar entre os registros.', 'Used to check that latitude of entered locations is reasonable. May be used to filter lists of resources that have locations.': 'Usado para verificar latitude de locais inseridos é razoável. Pode ser utilizado para filtrar listas de recursos que possuem locais.', 'Used to check that longitude of entered locations is reasonable. May be used to filter lists of resources that have locations.': 'Usado para verificar que longitude de locais inserido é razoável. Pode ser utilizado para filtrar listas de recursos que possuem locais.', 'Used to import data from spreadsheets into the database': 'Para importar dados utilizada a partir de planilhas no banco de dados', 'Used within Inventory Management, Request Management and Asset Management': 'Utilizado no gerenciamento de inventário, gerenciamento de Pedido e gerenciamento de ativos', 'User Account has been Disabled': 'Conta de Usuário foi Desativado', 'User Details': 'Detalhes do Usuário', 'User Management': 'gerenciamento do usuário', 'User Profile': 'Perfil do Utilizador', 'User Requests': 'Pedidos do Utilizador', 'User Updated': 'Utilizador actualizado', 'User added': 'Usuário Incluído', 'User already has this role': 'Usuário já tem essa função', 'User deleted': 'Usuário Excluído', 'User updated': 'Utilizador actualizado', 'User': 'usuário', 'Username': 'userName', 'Users removed': 'Utilizadores removidos', 'Users': 'usuários', 'Uses the REST Query Format defined in': 'Utiliza o formato de consulta REST definido em', 'Utilities': 'Serviços Públicos', 'Utility, telecommunication, other non-transport infrastructure': 'Serviços Públicos, telecomunicações, outra infra-estrutura não-transporte', 'Vacancies': 'Vagas', 'Value': 'value', 'Various Reporting functionalities': 'Diversas funcionalidades de relatório', 'Vehicle Crime': 'Roubo/Furto de veículo', 'Vehicle Types': 'Tipos de veículo', 'Vehicle': 'veículo', 'Verification Status': 'Status de verificação', 'Verified?': 'Verificado?', 'Verify password': 'Verificar senha', 'Very Good': 'Muito bom', 'Very High': 'muito alto', 'View Alerts received using either Email or SMS': 'Visualizar alertas utilizando quer o correio electrónico quer SMS.', 'View All': 'Visualizar todos', 'View Error Tickets': 'Ver bilhetes de erro', 'View Fullscreen Map': 'Visualização Inteira Mapa', 'View Image': 'Visualizar imagem', 'View Items': 'Ver itens', 'View On Map': 'Visualizar no mapa', 'View Outbox': 'Visualização Outbox', 'View Picture': 'Visualização de imagem', 'View Settings': 'Ver Configurações', 'View Tickets': 'Visualizar Bilhetes', 'View and/or update their details': 'Visualizar e/ou actualizar os seus detalhes', 'View or update the status of a hospital.': 'VISUALIZAR ou atualizar o status de um hospital.', 'View pending requests and pledge support.': 'Visualizar pedidos pendentes e suporte promessa.', 'View the hospitals on a map.': 'Visualizar os hospitais em um mapa.', 'View/Edit the Database directly': 'Visualizar/Editar o banco de dados diretamente', 'Village Leader': 'Líder da Aldeia', 'Village': 'Vila', 'Visible?': 'Visível?', 'Visual Recognition': 'Reconhecimento visual', 'Volcanic Ash Cloud': 'Nuvem de cinzas vulcânicas', 'Volcanic Event': 'Evento vulcânico', 'Volunteer Availability': 'Disponibilidade de Voluntário', 'Volunteer Details': 'Detalhes do voluntário', 'Volunteer Information': 'Voluntário Informações', 'Volunteer Management': 'Gestão de voluntário', 'Volunteer Project': 'Projeto voluntário', 'Volunteer Record': 'Voluntário Registro', 'Volunteer Request': 'Pedido voluntário', 'Volunteer added': 'Voluntário incluído', 'Volunteer availability added': 'Disponibilidade de voluntário incluída', 'Volunteer availability deleted': 'Disponibilidade de voluntário excluída', 'Volunteer availability updated': 'Disponibilidade de voluntário atualizada', 'Volunteer deleted': 'Voluntário excluído', 'Volunteer details updated': 'Atualização dos detalhes de voluntários', 'Volunteer updated': 'Voluntário atualizado', 'Volunteers List': 'Voluntários Lista', 'Volunteers were notified!': 'Voluntários foram notificados!', 'Volunteers': 'Voluntários', 'Vote': 'voto', 'Votes': 'votos', 'WASH': 'LAVAR', 'WMS Browser Name': 'WMS Nome do Navegador', 'WMS Browser URL': 'WMS Navegador URL', 'Walking Only': 'Apenas andando', 'Wall or other structural damage': 'Parede ou outros danos estruturais', 'Warehouse Details': 'Detalhes do Armazém', 'Warehouse added': 'Warehouse incluído', 'Warehouse deleted': 'Deposito apagado', 'Warehouse updated': 'Warehouse ATUALIZADO', 'Warehouse': 'Depósito', 'Warehouses': 'Armazéns', 'Water Sanitation Hygiene': 'Saneamento de água', 'Water collection': 'Coleta de água', 'Water gallon': 'Galão de água', 'Water storage containers in households': 'Recipientes de armazenamento de água nos domicílios', 'Water supply': 'Abastecimento de água', 'Web Map Service Browser Name': 'Nome do mapa da Web navegador de serviços', 'Web Map Service Browser URL': 'Web Mapa Do navegador de Serviços URL', 'Website': 'WebSite', 'Weight (kg)': 'peso (kg)', 'Weight': 'peso', 'Welcome to the Sahana Portal at': 'Bem-vindo ao Portal Sahana em', 'Well-Known Text': 'Texto bem conhecido', 'Wheat': 'Trigo', 'When a map is displayed that focuses on a collection of points, the map is zoomed to show just the region bounding the points.': 'Quando o mapa é que exibido incide sobre um conjunto de pontos, o mapa é aproximado para mostrar apenas a região delimitadora dos pontos.', 'When reports were entered': 'Quando os relatórios foram Digitados', 'Whiskers': 'Bigodes', 'Who is doing what and where': 'Quem está a fazer o quê e onde', 'Who usually collects water for the family?': 'Quem habitualmente colecta água para a família ?', 'Width (m)': 'Largura (m)', 'Width': 'width', 'Wild Fire': 'Fogo Selvagem', 'Wind Chill': 'Vento Frio', 'Window frame': 'Esquadria de janela', 'Winter Storm': 'Tempestade de inverno', 'Women of Child Bearing Age': 'Mulheres da criança Tendo Idade', 'Women participating in coping activities': 'Mulheres que participam em lidar atividades', 'Women who are Pregnant or in Labour': 'Mulheres que esto grávidas ou no trabalho', 'Womens Focus Groups': 'Mulheres de Grupos Foco', 'Wooden plank': 'Tábua de madeira', 'Wooden poles': 'Postes de madeira', 'Working hours end': 'Horas de trabalho final', 'Working hours start': 'Horas de trabalho iniciar', 'Working or other to provide money/food': 'Trabalhando para outros para prover dinheiro / alimentos', 'X-Ray': 'Raio-X', 'Year built': 'Ano de construção', 'Year of Manufacture': 'Ano de fabricação', 'Yellow': 'amarelo', 'Yes': 'YES', 'You are a recovery team?': 'Você é uma equipe de recuperação?', 'You are attempting to delete your own account - are you sure you want to proceed?': 'Você está tentando excluir sua própria conta-Tem certeza de que deseja continuar?', 'You are currently reported missing!': 'Você está atualmente desaparecido!', 'You can change the configuration of synchronization module in the Settings section. This configuration includes your UUID (unique identification number), sync schedules, beacon service and so on. Click the following link to go to the Sync Settings page.': 'Você pode alterar a configuração do Módulo de Sincronização na seção configurações. Essa configuração inclui o seu UUID (número de identificação exclusivo), Planejamentos de Sincronização, serviço Farol e assim por diante. Clique no link a seguir para ir para a página Configurações de Sincronização.', 'You can click on the map below to select the Lat/Lon fields': 'Você pode clicar no mapa abaixo para selecionar os campos Lat/Lon', 'You can select the Draw tool': 'Pode selecionar a ferramenta Desenho', 'You can set the modem settings for SMS here.': 'Pode definir a configuração do modem SMS aqui.', 'You can use the Conversion Tool to convert from either GPS coordinates or Degrees/Minutes/Seconds.': 'Você pode utilizar a ferramenta de conversão para converter coordenadas de GPS ou graus/minutos/segundos.', 'You do no have permission to cancel this received shipment.': 'Você não tem permissão para cancelar o recebimento deste carregamento.', 'You do no have permission to cancel this sent shipment.': 'Você não tem permissão para cancelar o envio desse carregamento.', 'You do no have permission to make this commitment.': 'Você não tem permissão de fazer este compromisso.', 'You do no have permission to receive this shipment.': 'Você não tem permissão para receber este carregamento.', 'You do no have permission to send this shipment.': 'Você não tem permissão para enviar este carregamento.', 'You do not have permission for any facility to make a commitment.': 'Você não tem permissão em qualquer instalação para estabelecer um compromisso.', 'You do not have permission for any facility to make a request.': 'Você não tem permissão em qualquer instalação para fazer um pedido.', 'You do not have permission for any site to add an inventory item.': 'Você não tem permissão em qualquer site para incluir um item de inventário.', 'You do not have permission for any site to make a commitment.': 'Você não tem permissão em qualquer site para assumir um compromisso.', 'You do not have permission for any site to make a request.': 'Você não tem permissão em qualquer site para fazer um pedido.', 'You do not have permission for any site to perform this action.': 'Você não tem permissão em qualquer site para executar esta ação.', 'You do not have permission for any site to receive a shipment.': 'Você não tem permissão para qualquer site para receber um carregamento.', 'You do not have permission for any site to send a shipment.': 'Você não tem permissão em qualquer site para enviar um carregamento.', 'You do not have permission to cancel this received shipment.': 'Você não tem permissão para cancelar este carregamento recebido.', 'You do not have permission to cancel this sent shipment.': 'Você não tem permissão para cancelar essa remessa enviada.', 'You do not have permission to make this commitment.': 'Você não tem permissão para assumir este compromisso.', 'You do not have permission to receive this shipment.': 'Você não tem permissão para receber esta remessa.', 'You do not have permission to send a shipment from this site.': 'Você não tem permissão para enviar um carregamento a partir deste site.', 'You do not have permission to send this shipment.': 'Você não tem permissão para enviar este carregamento.', 'You have a personal map configuration. To change your personal configuration, click': 'Você tem uma configuração de mapa pessoal. Para alterar a sua configuração pessoal, clique', 'You have found a dead body?': 'Descobriu um cadáver ?', 'You must be logged in to register volunteers.': 'Você deve estar com login efetuado para registrar voluntários.', 'You must be logged in to report persons missing or found.': 'Você deve estar registrado para informar pessoas desaparecidas ou localizadas.', 'You must provide a series id to proceed.': 'Você deve fornecer um número de série para continuar.', 'You should edit Twitter settings in models/000_config.py': 'Você deve editar as definições do Twitter em modelos/000_config.py', 'Your current ordered list of solution items is shown below. You can change it by voting again.': 'Seu lista de itens de solução pedidos aparece abaixo. Você pode alterá-lo ao votar novamente.', 'Your post was added successfully.': 'O post foi incluído com êxito.', 'Your system has been assigned a unique identification (UUID), which other computers around you can use to identify you. To view your UUID, you may go to Synchronization -> Sync Settings. You can also see other settings on that page.': 'Uma identificação exclusiva (UUID) foi designada para o seu sistema e poderá ser usada por outros computadores ao seu redor para identificá-lo. Para visualizar o seu UUID, você pode ir para Sincronização -> configurações Sync. Você também pode ver outras configurações nesta página.', 'Zero Hour': 'Hora Zero', 'Zinc roof': 'Telhado de Zinco', 'Zoom Levels': 'Níveis de Zoom', 'active': 'ativo', 'added': 'incluído', 'all records': 'todos os registros', 'allows a budget to be developed based on staff & equipment costs, including any admin overheads.': 'Permite que um orçamento seja desenvolvido com base em despesas com o pessoal e equipamento, incluindo quaisquer despesas gerais administrativas.', 'allows for creation and management of surveys to assess the damage following a natural disaster.': 'permite a criação e gerenciamento de pesquisas para avaliar os danos após um desastre natural.', 'an individual/team to do in 1-2 days': 'Uma pessoa/Equipe para fazer em 1 Dias-2', 'assigned': 'designado', 'average': 'Na média', 'black': 'Preto', 'blond': 'Loiro', 'blue': 'azul', 'brown': 'Marrom', 'by': 'por', 'c/o Name': 'c/o Nome', 'can be used to extract data from spreadsheets and put them into database tables.': 'Pode ser utilizado para extrair dados de planilhas e colocá-los em tabelas de dados.', 'cancelled': 'CANCELADO', 'caucasoid': 'Caucasoid', 'check all': 'Verificar Tudo', 'click for more details': 'Clique para mais detalhes', 'completed': 'Concluído', 'confirmed': 'Confirmado', 'consider': 'considerar', 'curly': 'Encaracolado', 'currently registered': 'Atualmente registrados', 'daily': 'Diariamente', 'dark': 'Escuro', 'data uploaded': 'dados carregados', 'database %s select': '% de dados s SELECIONE', 'database': 'DATABASE', 'db': 'dB', 'deceased': 'Falecido', 'delete all checked': 'excluir todos marcados', 'deleted': 'excluídos', 'design': 'projecto', 'diseased': 'Doentes', 'displaced': 'Deslocadas', 'divorced': 'Divorciado', 'done!': 'Pronto!', 'duplicate': 'duplicar', 'edit': 'Editar', 'eg. gas, electricity, water': 'Exemplo: Gás, eletricidade, água', 'embedded': 'integrado', 'enclosed area': 'Área anexada', 'export as csv file': 'Exportar como arquivo cvs.', 'fat': 'Gordura', 'feedback': 'Retorno', 'female': 'Sexo Feminino', 'flush latrine with septic tank': 'esvaziar latrina com tanque séptico', 'food_sources': 'fuentes de alimento', 'forehead': 'testa', 'found': 'Localizado', 'from Twitter': 'do Twitter', 'green': 'verde', 'grey': 'cinza', 'here': 'aqui', 'high': 'Alta', 'hourly': 'Por hora', 'households': 'Membros da família', 'identified': 'identificado', 'ignore': 'Ignore', 'in Deg Min Sec format': 'GRAUS Celsius no formato Mín. Segundo', 'in GPS format': 'GPS no formato', 'inactive': 'inativo', 'injured': 'Feridos', 'insert new %s': 'inserir novo %s', 'insert new': 'inserir novo', 'invalid request': 'PEDIDO INVÁLIDO', 'invalid': 'inválido', 'is a central online repository where information on all the disaster victims and families, especially identified casualties, evacuees and displaced people can be stored. Information like name, age, contact number, identity card number, displaced location, and other details are captured. Picture and finger print details of the people can be uploaded to the system. People can also be captured by group for efficiency and convenience.': 'É um repositório central de informações em tempo real onde vítimas de desastres e seus familiares, especialmente casos isolados, refugiados e pessoas deslocadas podem ser abrigados. Informações como nome, idade, Contate o número de Bilhete de Identidade número, localização Deslocadas, e outros detalhes são capturados. Detalhes de impressão Imagem e Dedo de as pessoas possam ser transferidos por upload para o sistema. As pessoas podem também ser capturados pelo grupo por eficiência e conveniência.', 'is envisioned to be composed of several sub-modules that work together to provide complex functionality for the management of relief and project items by an organization. This includes an intake system, a warehouse management system, commodity tracking, supply chain management, fleet management, procurement, financial tracking and other asset and resource management capabilities': 'tem como visão ser composto de vários sub-módulos que interagem juntos a fim de fornecer funcionalidade complexa para o gerenciamento de itens de ajuda e projeto de uma organização. Isso inclui um sistema de admissão, um sistema de gestão de depósitos, rastreamento de mercadorias, gestão da cadeia de fornecimentos, de gestão da frota, aquisições, recursos de rastreamento financeiro de ativos e outros e gerenciamento de recursos', 'keeps track of all incoming tickets allowing them to be categorised & routed to the appropriate place for actioning.': 'Mantém controle de todos os bilhetes de entrada permitindo que sejam classificados & direcionados ao local apropriado para atuação.', 'latrines': 'privadas', 'leave empty to detach account': 'deixar em branco para desconectar a conta', 'legend URL': 'Legenda URL', 'light': 'luz', 'long': 'Longo', 'long>12cm': 'comprimento>12cm', 'low': 'baixo', 'male': 'masculino', 'manual': 'Manual', 'married': 'casado', 'medium': 'médio.', 'medium<12cm': 'médio<12cm', 'meters': 'metros', 'missing': 'ausente', 'module allows the site administrator to configure various options.': 'Módulo permite que o administrador do site configure várias opções.', 'module helps monitoring the status of hospitals.': 'Módulo ajuda monitorando o status de hospitais.', 'module provides a mechanism to collaboratively provide an overview of the developing disaster, using online mapping (GIS).': 'Módulo fornece um mecanismo para colaboração fornecem uma visão geral do desastre de desenvolvimento, utilização de mapeamento online (SIG).', 'mongoloid': 'Mongolóide', 'more': 'Mais', 'n/a': 'n/d', 'negroid': 'Negróide', 'never': 'Nunca', 'new record inserted': 'Novo registro inserido', 'new': 'Novo(a)', 'next 100 rows': 'próximas 100 linhas', 'none': 'nenhum', 'not accessible - no cached version available!': 'Não acessível-nenhuma versão em cache disponível!', 'not accessible - using cached version from': 'Não acessível-Utilizando versão em Cache', 'not specified': 'não especificado', 'num Zoom Levels': 'Num níveis de Zoom', 'obsolete': 'Obsoleto', 'on': 'Ligar', 'once': 'uma vez', 'open defecation': 'Abrir evacuação', 'optional': 'Optional', 'or import from csv file': 'ou importar a partir do arquivo csv', 'other': 'outros', 'over one hour': 'Mais de uma hora', 'people': 'pessoas', 'piece': 'parte', 'pit latrine': 'cova de latrina', 'pit': 'cova', 'postponed': 'Adiado', 'preliminary template or draft, not actionable in its current form': 'Modelo ou rascunho preliminar, não acionável em sua forma atual', 'previous 100 rows': '100 linhas anteriores', 'record does not exist': 'Registro não existe', 'record id': 'ID do Registro', 'red': 'vermelho', 'reported': 'relatado', 'reports successfully imported.': 'relatórios importados com êxito.', 'representation of the Polygon/Line.': 'Representação do polígono /Linha.', 'retired': 'Aposentado', 'river': 'Rio', 'see comment': 'Veja o comentário', 'selected': 'Selecionado', 'separated from family': 'Separados da família', 'separated': 'Separado', 'shaved': 'raspado', 'short': 'pequeno', 'short<6cm': 'pequeno<6cm', 'sides': 'lados', 'sign-up now': 'Inscreva-se agora', 'single': 'único', 'slim': 'estreito', 'specify': 'Especifique.', 'staff members': 'Membros da equipe', 'staff': 'equipe', 'state location': 'Localização do Estado', 'state': 'Estado', 'straight': 'reto', 'suffered financial losses': 'Sofreram perdas financeiras', 'tall': 'Altura', 'this': 'isto', 'to access the system': 'Para acessar o sistema', 'tonsure': 'tonsura', 'total': 'Total', 'tweepy module not available within the running Python - this needs installing for non-Tropo Twitter support!': 'Módulo tweepy não disponível com a execução Python-isto necessita da instalação para suporte a tropo Twitter!', 'unable to parse csv file': 'Não é possível analisar arquivo csv', 'uncheck all': 'Desmarcar Tudo', 'unidentified': 'IDENTIFICADO', 'unspecified': 'UNSPECIFIED', 'unverified': 'Não Verificado', 'updated': 'Atualizado', 'updates only': 'Apenas atualizações', 'verified': 'Verificado', 'volunteer': 'voluntário', 'volunteers': 'Voluntários', 'wavy': 'Serpentina', 'weekly': 'Semanalmente', 'white': 'branco', 'wider area, longer term, usually contain multiple Activities': 'maior área, maior prazo, contém usualmente múltiplas actividades', 'widowed': 'Viúvo', 'window': 'janela', 'within human habitat': 'Dentro do habitat humano', 'xlwt module not available within the running Python - this needs installing for XLS output!': 'Módulo Xlwt não disponível no módulo Python sendo executado - isto necessita ser instalado para saída XLS!', 'yes': 'YES', }
gnarula/eden_deployment
languages/pt-br.py
Python
mit
271,535
[ "VisIt" ]
61782ddad1e94cc740ccc8d96facaefa030ce419a31197da64be36b52966f0fc
# -*- coding: utf-8 -*- # Copyright (C) 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import argparse import os import libcst as cst import pathlib import sys from typing import (Any, Callable, Dict, List, Sequence, Tuple) def partition( predicate: Callable[[Any], bool], iterator: Sequence[Any] ) -> Tuple[List[Any], List[Any]]: """A stable, out-of-place partition.""" results = ([], []) for i in iterator: results[int(predicate(i))].append(i) # Returns trueList, falseList return results[1], results[0] class recommendationengineCallTransformer(cst.CSTTransformer): CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { 'collect_user_event': ('parent', 'user_event', 'uri', 'ets', ), 'create_catalog_item': ('parent', 'catalog_item', ), 'create_prediction_api_key_registration': ('parent', 'prediction_api_key_registration', ), 'delete_catalog_item': ('name', ), 'delete_prediction_api_key_registration': ('name', ), 'get_catalog_item': ('name', ), 'import_catalog_items': ('parent', 'input_config', 'request_id', 'errors_config', ), 'import_user_events': ('parent', 'input_config', 'request_id', 'errors_config', ), 'list_catalog_items': ('parent', 'page_size', 'page_token', 'filter', ), 'list_prediction_api_key_registrations': ('parent', 'page_size', 'page_token', ), 'list_user_events': ('parent', 'page_size', 'page_token', 'filter', ), 'predict': ('name', 'user_event', 'page_size', 'page_token', 'filter', 'dry_run', 'params', 'labels', ), 'purge_user_events': ('parent', 'filter', 'force', ), 'update_catalog_item': ('name', 'catalog_item', 'update_mask', ), 'write_user_event': ('parent', 'user_event', ), } def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: try: key = original.func.attr.value kword_params = self.METHOD_TO_PARAMS[key] except (AttributeError, KeyError): # Either not a method from the API or too convoluted to be sure. return updated # If the existing code is valid, keyword args come after positional args. # Therefore, all positional args must map to the first parameters. args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) if any(k.keyword.value == "request" for k in kwargs): # We've already fixed this file, don't fix it again. return updated kwargs, ctrl_kwargs = partition( lambda a: not a.keyword.value in self.CTRL_PARAMS, kwargs ) args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) request_arg = cst.Arg( value=cst.Dict([ cst.DictElement( cst.SimpleString("'{}'".format(name)), cst.Element(value=arg.value) ) # Note: the args + kwargs looks silly, but keep in mind that # the control parameters had to be stripped out, and that # those could have been passed positionally or by keyword. for name, arg in zip(kword_params, args + kwargs)]), keyword=cst.Name("request") ) return updated.with_changes( args=[request_arg] + ctrl_kwargs ) def fix_files( in_dir: pathlib.Path, out_dir: pathlib.Path, *, transformer=recommendationengineCallTransformer(), ): """Duplicate the input dir to the output dir, fixing file method calls. Preconditions: * in_dir is a real directory * out_dir is a real, empty directory """ pyfile_gen = ( pathlib.Path(os.path.join(root, f)) for root, _, files in os.walk(in_dir) for f in files if os.path.splitext(f)[1] == ".py" ) for fpath in pyfile_gen: with open(fpath, 'r') as f: src = f.read() # Parse the code and insert method call fixes. tree = cst.parse_module(src) updated = tree.visit(transformer) # Create the path and directory structure for the new file. updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) updated_path.parent.mkdir(parents=True, exist_ok=True) # Generate the updated source file at the corresponding path. with open(updated_path, 'w') as f: f.write(updated.code) if __name__ == '__main__': parser = argparse.ArgumentParser( description="""Fix up source that uses the recommendationengine client library. The existing sources are NOT overwritten but are copied to output_dir with changes made. Note: This tool operates at a best-effort level at converting positional parameters in client method calls to keyword based parameters. Cases where it WILL FAIL include A) * or ** expansion in a method call. B) Calls via function or method alias (includes free function calls) C) Indirect or dispatched calls (e.g. the method is looked up dynamically) These all constitute false negatives. The tool will also detect false positives when an API method shares a name with another method. """) parser.add_argument( '-d', '--input-directory', required=True, dest='input_dir', help='the input directory to walk for python files to fix up', ) parser.add_argument( '-o', '--output-directory', required=True, dest='output_dir', help='the directory to output files fixed via un-flattening', ) args = parser.parse_args() input_dir = pathlib.Path(args.input_dir) output_dir = pathlib.Path(args.output_dir) if not input_dir.is_dir(): print( f"input directory '{input_dir}' does not exist or is not a directory", file=sys.stderr, ) sys.exit(-1) if not output_dir.is_dir(): print( f"output directory '{output_dir}' does not exist or is not a directory", file=sys.stderr, ) sys.exit(-1) if os.listdir(output_dir): print( f"output directory '{output_dir}' is not empty", file=sys.stderr, ) sys.exit(-1) fix_files(input_dir, output_dir)
googleapis/python-recommendations-ai
scripts/fixup_keywords.py
Python
apache-2.0
6,999
[ "VisIt" ]
61b51e55d08a272f8290a206f18840fd379e292a21fe65fd1ba0d77de23c8ce0
#!/usr/bin/python ############################## ## ## MLTreeMap TSN v. 0.0 ## ############################## try: import sys import os from os import path, _exit import shutil import re import glob import subprocess from optparse import OptionParser import time except: print """ Could not load some user defined module functions""" print """ """ print traceback.print_exc(10) sys.exit(3) def os_type(): """Return the operating system of the user.""" x = sys.platform if x: hits = re.search(r'darwin', x, re.I) if hits : return 'mac' hits = re.search(r'win', x, re.I) if hits : return 'win' hits = re.search(r'linux', x, re.I) if hits: return 'linux' def pathDelim(): """Return the path deliminator based on the operating system of the user.""" ostype = os_type() if ostype == 'win': return "\\" if ostype in ['linux', 'mac']: return "/" PATHDELIM = str(pathDelim()) class Autovivify(dict): """In cases of Autovivify objects, enable the referencing of variables (and sub-variables) without explicitly declaring those variables beforehand.""" def __getitem__(self, item): try: return dict.__getitem__(self, item) except KeyError: value = self[item] = type(self)() return value # TK class BookKeeping: we will implement a singleton class for bookkeeping parser = None def createParser(): global parser epilog = "MLTreeMap step searches for a set of 40 universally present COGs as marker genes and taxonomically identifies some sequences in the Tree of Life." epilog = re.sub(r'\s+', ' ', epilog) """Returns the parser to interpret user options.""" parser = OptionParser(description=epilog) parser.add_option('-i', '--input', dest='input', help='your sequence input file') parser.add_option('-b', '--bootstraps', dest='bootstraps', default=0, type=int, help='the number of Bootstrap replicates') parser.add_option('-c', '--cluster', dest='cluster', default=0, choices=[0,'s'], help='use a computer cluster? (0 = no cluster; s = sun grid)') parser.add_option('-f', '--phylogeny', dest='phylogeny', default='v', choices=['v','p'], help='RAxML algorithm (v = Maximum Likelihood; p = Maximum Parsimony)') parser.add_option('-g', '--gblocks', dest='gblocks', default=50, type=int, help='minimal sequence length after Gblocks') parser.add_option('-l', '--filelength', dest='filelength', default=2000, type=int, help='long input files will be splint into files containing L sequences each') parser.add_option('-m', '--memory', dest='memory', default=0, type=int, help='minimum memory on a sungrid_cluster in GB') parser.add_option('-o', '--output', dest='output', default='output/', help='output directory') parser.add_option('-s', '--bitscore', dest='bitscore', default=60, type=int, help='minimum bitscore for the blast hits') parser.add_option('-t', '--reftree', dest='reftree', default='p', choices=['p','g','i'], help='phylogenetic reference tree (p = MLTreeMap reference tree; g = GEBA reference tree; i = fungi tree)') parser.add_option('-r', '--reftype', dest='reftype', default='n', choices=['a','n'], help='the type of input sequences (a = Amino Acid; n = Nucleotide)') parser.add_option('-e', '--executables', dest='executables', default='sub_binaries', help='locations of executables (e.g. blastx, Gblocks, etc.)') parser.add_option('-x', '--mltreemap', dest='mltreemap', default = os.path.abspath(os.path.dirname(os.path.realpath(__file__))) , help='location of MLTreeMap resources (default: directory of mltreemap-improved.py)') parser.add_option('-T', '--num_threads', dest='num_threads', default = None , help='specifies the number of CPU threads to use in raxml and blast (default: 1)') parser.add_option('-d', '--delete', dest='delete', default = None, help='the sections of files to be deleted, as separated by colons (1 = Sequence Files; 2 = BLAST Results; 3 = Genewise Results; 4 = hmmalign and Gblocks Results; 5 = Unparsed RAxML Results') parser.add_option( '--overwrite', dest='overwrite', action='store_true', default = False, help='overwrites previously processed output folders') parser.add_option( '-v', '--verbose', dest='verbose', action='store_true', default = False, help='maintains the various_outputs and final_RaXML_outputs directories containing intermediate files') parser.add_option( '--showmessages', dest='showmessages', action='store_true', default = False, help='shows the detailed messages') parser.add_option( '--mltreemap_data', dest='mltreemap_data', default = '', help='The folder where the mltreemap data') return parser def checkParserArguments(options): """Returns 'args', a summary of MLTreeMap settings.""" # Ensure files contain more than 0 sequences if options.filelength <= 0: sys.exit('Input files require a positive number of sequences!') # Set the reference data file prefix and the reference tree name if options.reftree == 'g': setattr(options, 'reference_data_prefix', 'geba_') setattr(options,'reference_tree', 'geba.tree') elif options.reftree == 'i': setattr(options,'reference_data_prefix','fungi_') setattr(options, 'reference_tree', 'fungitr_tree.txt') else: setattr(options, 'reference_data_prefix', '') setattr(options, 'reference_tree','MLTreeMap_reference.tree') return options def removePreviousOutput(args): """ Prompts the user to determine how to deal with a pre-existing output directory. Returns an updated version of 'args', a summary of MLTreeMap settings. """ # Add (or replace a trailing (back)slash with) the PATHDELIM to the end of the output directory while re.search(r'/\Z', args.output) or re.search(r'\\\Z', args.output): args.output = args.output[:-1] args.output += PATHDELIM # delete previous output folders by force if args.overwrite: if path.exists(args.output): shutil.rmtree(args.output) # Prompt the user to deal with the pre-existing output directory while os.path.isdir(args.output): print('WARNING: Your output directory "' + args.output + '" already exists!') print('Overwrite [1], quit [2], or change directory [3]?') answer = raw_input() answer = int(answer) while not answer == 1 and not answer == 2 and not answer == 3: answer = raw_input('Invalid input. Please choose 1, 2, or 3.\n') answer = int(answer) if answer == 1: print('Do you really want to overwrite the old output directory?') print('All data in it will be lost!') answer2 = raw_input('Yes [y] or no [n]?\n') while not answer2 == 'y' and not answer2 == 'n': answer2 = raw_input('Invalid input. Please choose y or n.\n') if answer2 == 'y': shutil.rmtree(args.output) else: sys.exit('Exit MLTreeMap\n') elif answer == 2: sys.exit('Exit MLTreeMap\n') else: args.output = raw_input('Please enter the path to the new directory.\n') # Create the output directories args.output_dir_var = args.output + 'various_outputs' + PATHDELIM args.output_dir_raxml = args.output + 'final_RAxML_outputs' + PATHDELIM args.output_dir_final = args.output + 'final_outputs' + PATHDELIM os.makedirs(args.output) os.mkdir(args.output_dir_var) os.mkdir(args.output_dir_raxml) os.mkdir(args.output_dir_final) return args def createCogList(args): """Returns an Autovivification of the COGs in the MLTreeMap COG list file, and a list of output text precursors based on the analysis type.""" cog_list = Autovivify() text_of_analysis_type = Autovivify() alignment_set = args.reftree kind_of_cog = '' # For each line in the COG list file... cogInputList = open( args.mltreemap_data + PATHDELIM + \ 'data' + PATHDELIM + 'tree_data' + PATHDELIM + 'cog_list.txt', 'r') cogList = [ x.strip() for x in cogInputList.readlines() ] for cogInput in cogList: # Get the kind of COG if cogInput is a header line if (re.match(r'\A#(.*)', cogInput)): kind_of_cog = re.match(r'\A#(.*)', cogInput).group(1) continue # Add data to COG list based on the kind of COG it is if (kind_of_cog == 'phylogenetic_cogs'): cog_list[kind_of_cog][cogInput] = alignment_set cog_list['all_cogs'][cogInput] = alignment_set text_inset = '' if (alignment_set == 'g'): text_inset = ' based on the GEBA reference' if (alignment_set == 'i'): text_inset = ' focusing only on fungi' text_of_analysis_type[alignment_set] = 'Phylogenetic analysis' + text_inset + ':' elif (kind_of_cog == 'phylogenetic_rRNA_cogs'): cog, denominator, text = cogInput.split('\t') cog_list[kind_of_cog][cog] = denominator cog_list['all_cogs'][cog] = denominator text_of_analysis_type[denominator] = 'Phylogenetic analysis, ' + text + ':' elif (kind_of_cog == 'functional_cogs'): cog, denominator, text = cogInput.split('\t') cog_list[kind_of_cog][cog] = denominator cog_list['all_cogs'][cog] = denominator text_of_analysis_type[denominator] = 'Functional analysis, ' + text + ':' # Close the COG list file cogInputList.close() return (cog_list, text_of_analysis_type) def calculate_overlap(info): """Returns the overlap length of the base and the check sequences.""" overlap = 0 base_start = info['base']['start'] base_end = info['base']['end'] check_start = info['check']['start'] check_end = info['check']['end'] # Calculate the overlap based on the relative positioning of the base and check sequences if base_start <= check_start and check_start <= base_end and base_end <= check_end: # Base ---- # Check ------- overlap = base_end - check_start elif base_start <= check_start and check_end <= base_end: # Base -------- # Check -- overlap = check_end - check_start elif check_start <= base_start and base_start <= check_end and check_end <= base_end: # Base ----- # Check ----- overlap = check_end - base_start elif check_start <= base_start and base_end <= check_end: # Base -- # Check -------- overlap = base_end - base_start return overlap def splitFastaInput(args): """ Splits the input file into multiple files, each containing a maximum number of sequences as specified by the user. Ensures each sequence and sequence name is valid. Returns a list of the files produced from the input file. """ # Confirm input file is a fasta file input = open(args.input, 'r') if (not input.read(1) == '>'): sys.exit('ERROR: Your file does not appear to be a proper FASTA file!\n') # Unread the '>' to prevent problems later input.seek(-1,1) # Determine the output file names and open the output files if (re.match(r'\A.*\/(.*)', args.input)): inputFileName = re.match(r'\A.*\/(.*)', args.input).group(1) else: inputFileName = args.input outputSplit = open(args.output_dir_var + inputFileName + '_0.txt', 'w') outputFormatted = open(args.output_dir_var + inputFileName + '_formatted.txt', 'w') args.formatted_input_file = args.output_dir_var + inputFileName + '_formatted.txt' countFiles = 0 countSequences = 0 # Iterate through the input file... countTotal = 0 countNucleotides = 0 countXN = 0 countUndef = 0 splitFiles = [] for line in input: # If the line is a sequence name... if (re.search('\A>', line)): countSequences += 1 # Replace all non a-z, A-Z, 0-9, or . characters with a _ # Then replace the initial _ with a > line = re.sub(r'[^a-zA-Z0-9.\r\n]', '_', line) line = re.sub(r'\A_', '>', line) # Because RAxML can only work with file names having length <= 125, # Ensure that the sequence name length is <= 100 if (line.__len__() > 100): line = line[0:100] # Split the file if countSequences > the number of sequences per file specified by the user if (countSequences >= args.filelength): countSequences = 0 splitFiles.append(args.output_dir_var + inputFileName + '_%d.txt' %(countFiles)) countFiles += 1 outputSplit.close() outputSplit = open(args.output_dir_var + inputFileName + '_%d.txt' %(countFiles), 'w') # Else, if the line is a sequence... else: # Remove all non-characters from the sequence re.sub(r'[^a-zA-Z]','', line) # Count the number of {atcg} and {xn} in all the sequences characters = [] characters = list(line) for character in characters: countTotal += 1 # If input is nucleotides, count nucleotides if args.reftype == 'n': if (re.match(r'[acgtACGT]', character)): countNucleotides += 1 elif (re.match(r'[xnXN]', character)): countXN += 1 else: countUndef += 1 # Else, if input is amino acids, count amino acids elif args.reftype == 'a': if re.match(r'[abcdefghiklmnpqrstuvwyzABCDEFGHIKLMNPQRSTUVWYZ*]', character): countNucleotides += 1 elif re.match(r'[xnXN]', character): countXN += 1 else: countUndef += 1 # Write the lines to the appropriate files outputSplit.write(line) outputFormatted.write(line) # Close the files input.close() outputSplit.close() outputFormatted.close() # If there's only one input file, add it to the list of split input files if not splitFiles: splitFiles.append(args.output_dir_var + inputFileName + '_%d.txt' %(countFiles)) # Exit the program if character count is 0 if (countTotal == 0): sys.exit('ERROR: Your input file appears to be corrupted. No sequences were found!\n') # Exit the program if all sequences are composed only of X or N elif (countXN == countTotal): sys.exit('ERROR: Your sequence(s) contain only X or N!\n') # Exit the program if less than half of the characters are nucleotides elif (float(countNucleotides / float(countTotal)) < 0.5): if args.reftype == 'n': sys.exit('ERROR: Your sequence(s) most likely contain no DNA!\n') elif args.reftype == 'a': sys.exit('ERROR: Your sequence(s) most likely contain no AA!\n') return splitFiles def runBlast(args, splitFiles): """Runs the BLAST algorithm on each of the split input files.""" print '' print 'Run BLAST' # For each file containing a maximum of the specified number of sequences... alignment_data_dir = args.mltreemap_data + PATHDELIM + \ 'data' + PATHDELIM + \ args.reference_data_prefix + 'alignment_data' + PATHDELIM + \ '*.fa' db_nt = '-db "' db_aa = '-db "' for file in glob.glob(alignment_data_dir): if re.match(r'.*rRNA\.fa\Z', file): db_nt += file + ' ' else: db_aa += file + ' ' db_nt += '"' db_aa += '"' for splitFile in sorted(splitFiles): # Ensure splitFile is a .txt file; save file name if so, die otherwise blastInputFileName = '' if (not re.match(r'\A.+/(.+)\.txt\Z', splitFile)): sys.exit('ERROR: Something is wrong with the directory of the BLAST input file!\n') else: blastInputFileName = re.match(r'\A.+/(.+)\.txt\Z', splitFile).group(1) # Run the appropriate BLAST command(s) based on the input sequence type if args.reftype == 'n': command = args.executables + PATHDELIM + 'blastx ' + \ '-query ' + splitFile + ' ' + db_aa + ' ' + \ '-evalue 0.01 -max_target_seqs 20000 ' + \ '-dbsize 1000000 -outfmt 6 ' if args.num_threads: if (int(args.num_threads) >= 1) and (int(args.num_threads) < int(available_cpu_count())): command += '-num_threads ' + str(int(args.num_threads)) + ' ' else: command += '-num_threads ' + str(1) + ' ' command += '>> ' + args.output_dir_var + blastInputFileName + '.BLAST_results_raw.txt' os.system(command) command = args.executables + PATHDELIM + 'blastn ' + \ '-query ' + splitFile + ' ' + db_nt + ' ' + \ '-evalue 0.01 -max_target_seqs 20000 ' + \ '-dbsize 1000000 -outfmt 6 ' if args.num_threads: if (int(args.num_threads) >= 1) and (int(args.num_threads) < int(available_cpu_count())): command += '-num_threads ' + str(int(args.num_threads)) + ' ' else: command += '-num_threads ' + str(1) + ' ' command += '>> ' + args.output_dir_var + blastInputFileName + '.rRNA_BLAST_results_raw.txt' os.system(command) elif args.reftype == 'a': command = args.executables + PATHDELIM + 'blastp ' + \ '-query ' + splitFile + ' ' + db_aa + ' ' + \ '-evalue 0.01 -max_target_seqs 20000 ' + \ '-dbsize 1000000 -outfmt 6 ' if args.num_threads: if (int(args.num_threads) >= 1) and (int(args.num_threads) < int(available_cpu_count())): command += '-num_threads ' + str(int(args.num_threads)) + ' ' else: command += '-num_threads ' + str(1) + ' ' command += '>> ' + args.output_dir_var + blastInputFileName + '.BLAST_results_raw.txt' os.system(command) # Remove the BLAST input file if path.exists(splitFile): os.remove(splitFile) # TK? Remove empty BLAST result raw files; store non-empty files in a list def readBlastResults(args): """ Deletes empty BLAST results files. Returns a list of non-empty BLAST results files. """ rawBlastResultFiles = [] for file in glob.glob(args.output_dir_var + '*BLAST_results_raw.txt'): file.rstrip('\r\n') if path.getsize(file) <= 0: os.remove(file) else: rawBlastResultFiles.append(file) return rawBlastResultFiles def parseBlastResults(args, rawBlastResultFiles, cog_list): """Returns an Autovivification of purified (eg. non-redundant) BLAST hits.""" counter = 0 purifiedBlastHits = Autovivify() for file in sorted(rawBlastResultFiles): try: blastResults = open(file, 'r') except IOError: print "ERROR: Cannot open BLAST outputfile " + file continue contigs = Autovivify() identifier = 0 for line in blastResults: # Clear the variables referencing the contig, COG, query start, query end, reference start, reference end, and bitscore # Interpret the BLAST hit, and assign the details accordingly tempContig, tempDetailedCOG, _, _, _, _, tempQStart, tempQEnd, tempRStart, tempREnd, _, tempBitScore = line.split('\t') tempREnd = int(tempREnd) tempRStart = int(tempRStart) tempQEnd = int(tempQEnd) tempQStart = int(tempQStart) tempBitScore = float(tempBitScore) # Skip to next BLAST hit if bit score is less than user-defined minimum if (tempBitScore <= args.bitscore): continue # Determine the direction of the hit relative to the reference direction = 'forward' if tempRStart > tempREnd: temp = tempRStart tempRStart = tempREnd tempREnd = temp direction = 'reverse' if tempQStart > tempQEnd: temp = tempQStart tempQStart = tempQEnd tempQEnd = temp if (direction == 'reverse'): sys.exit('ERROR: Parsing error with the BLAST results. Please notify the authors about ' + tempContig + ' at ' +\ tempDetailedCOG + 'q('+tempQEnd+'..'+tempQStart+'),r('+tempREnd+'..'+tempRStart+')') direction = 'reverse' # Trim COG name to last 7 characters of detailed COG name # TK - This will be important to note in the user's manual, especially if we enable people to add their own COGs later if re.match(r'.*(.{7})\Z', tempDetailedCOG): tempCOG = re.match(r'.*(.{7})\Z', tempDetailedCOG).group(1) else: sys.exit('ERROR: Could not detect the COG of sequence ' + tempDetailedCOG) # Save contig details to the list contigs[tempContig][identifier]['bitscore'] = tempBitScore contigs[tempContig][identifier]['cog'] = tempCOG contigs[tempContig][identifier]['seq_start'] = tempQStart contigs[tempContig][identifier]['seq_end'] = tempQEnd contigs[tempContig][identifier]['direction'] = direction contigs[tempContig][identifier]['validity'] = True identifier += 1 # Close the file blastResults.close() # Purify the BLAST hits # For each contig sorted by their stringwise comparison... for contig in sorted(contigs.keys()): identifier = 0 # For each blast result for that contig... for base_blast_result_raw_identifier in sorted(contigs[contig].keys()): base_bitscore = contigs[contig][base_blast_result_raw_identifier]['bitscore'] base_cog = contigs[contig][base_blast_result_raw_identifier]['cog'] base_start = contigs[contig][base_blast_result_raw_identifier]['seq_start'] base_end = contigs[contig][base_blast_result_raw_identifier]['seq_end'] direction = contigs[contig][base_blast_result_raw_identifier]['direction'] base_length = base_end - base_start # Skip if base_bitscore is less than user specified minimum bitscore if (base_bitscore < args.bitscore): continue # Set validity to 0 if COG is not in list of MLTreeMap COGs if not base_cog in cog_list['all_cogs']: contigs[contig][base_blast_result_raw_identifier]['validity'] = False # Compare the BLAST hit (base) against all others # There may be several opinions about how to do this. This way is based on the original MLTreeMap # ----A---- --C-- # ---B--- # A kills B, B kills C. (Another approach would be to let C live, but the original MLTreeMap authors don't expect C to be useful) for check_blast_result_raw_identifier in sorted(contigs[contig]): check_bitscore = contigs[contig][check_blast_result_raw_identifier]['bitscore'] check_cog = contigs[contig][check_blast_result_raw_identifier]['cog'] check_start = contigs[contig][check_blast_result_raw_identifier]['seq_start'] check_end = contigs[contig][check_blast_result_raw_identifier]['seq_end'] check_length = check_end - check_start # Don't compare base hit against itself; skip to next iteration if base_blast_result_raw_identifier == check_blast_result_raw_identifier: continue # Compare the base and check BLAST hits info = Autovivify() info['base']['start'] = base_start info['base']['end'] = base_end info['check']['start'] = check_start info['check']['end'] = check_end overlap = calculate_overlap(info) counter +=1 # Check for validity for hits with overlap if overlap > 0: if overlap > 0.5*base_length and base_bitscore < check_bitscore: contigs[contig][base_blast_result_raw_identifier]['validity'] = False elif overlap > 0.5*check_length and check_bitscore < base_bitscore: contigs[contig][check_blast_result_raw_identifier]['validity'] = False elif base_start == check_start and base_end == check_end: # If both are the same, keep only the one with the smaller identifier if check_blast_result_raw_identifier > base_blast_result_raw_identifier: contigs[contig][check_blast_result_raw_identifier]['validity'] = False else: contigs[contig][base_blast_result_raw_identifier]['validity'] = False # Save purified hits for valid base hits if contigs[contig][base_blast_result_raw_identifier]['validity']: purifiedBlastHits[contig][identifier]['bitscore'] = base_bitscore purifiedBlastHits[contig][identifier]['cog'] = base_cog purifiedBlastHits[contig][identifier]['start'] = base_start purifiedBlastHits[contig][identifier]['end'] = base_end purifiedBlastHits[contig][identifier]['direction'] = direction purifiedBlastHits[contig][identifier]['is_already_placed'] = False identifier += 1 # Print the BLAST results for each contig for contig in sorted(purifiedBlastHits.keys()): outfile = args.output_dir_var + contig + '_blast_result_purified.txt' out = open(outfile, 'w') sorting_hash = {} # Identify the first instance of each bitscore for identifier in sorted(purifiedBlastHits[contig].keys()): if not purifiedBlastHits[contig][identifier]['bitscore'] in sorting_hash: sorting_hash[purifiedBlastHits[contig][identifier]['bitscore']] = {} sorting_hash[purifiedBlastHits[contig][identifier]['bitscore']][identifier] = 1 # Print the (potentially reduced set of) BLAST results ordered by decreasing bitscore for bitscore in sorted(sorting_hash.keys(), reverse=True): for identifier in sorted(sorting_hash[bitscore]): out.write(contig + '\t' + str(purifiedBlastHits[contig][identifier]['start']) + '\t' +\ str(purifiedBlastHits[contig][identifier]['end']) + '\t' +\ str(purifiedBlastHits[contig][identifier]['direction']) + '\t' +\ purifiedBlastHits[contig][identifier]['cog'] + '\t' + str( bitscore) + '\n') out.close() return purifiedBlastHits def blastpParser(args, shortened_sequence_files, blast_hits_purified): """ For each contig, produces a file similar to the Genewise output file (this is in cases where Genewise is unnecessary because it is already an AA sequence. Returns an Autovivification of the output file for each contig. """ blastpSummaryFiles = Autovivify() for contig in sorted(blast_hits_purified.keys()): output_file = args.output_dir_var + contig + '_blast_result_summary.txt' try: output = open(output_file, 'w') except IOError: sys.exit('ERROR: Unable to open ' + output_file + '!\n') blastpSummaryFiles[contig][output_file] = 1 shortened_sequence_file = args.output_dir_var + contig + '_sequence_shortened.txt' try: sequence_file = open(shortened_sequence_file, 'r') except IOError: sys.exit('ERROR: Could not open ' + shortened_sequence_file + '!\n') flagSeq = 0 sequence = '' # Get the sequence from the shortened sequence file for line in sequence_file: if re.search('\A>', line): if flagSeq == 1: sys.exit('ERROR: Unexpected multiple shortened sequences found!\n') flagSeq = 1 continue else: line.strip() sequence += line # Write the output file to imitate the Genewise results for count in sorted(blast_hits_purified[contig].keys()): output.write(str(blast_hits_purified[contig][count]['cog']) + '\t') output.write(str(blast_hits_purified[contig][count]['start']) + '\t') output.write(str(blast_hits_purified[contig][count]['end']) + '\t') output.write(str(blast_hits_purified[contig][count]['direction']) + '\t') output.write(str(sequence) + '\n') output.close() return blastpSummaryFiles def produceGenewiseFiles(args, blast_hits_purified): """ Takes an Autovivification of purified BLAST hits and uses these to produce the input files needed for Genewise. Returns an Autovivification mapping the contig to its sequence's start and end positions for Genewise. Returns a list of files to be run through Genewise. """ flanking_length = 1000 # Recommended: 1000 prae_contig_coordinates = Autovivify() contig_coordinates = Autovivify() shortened_sequence_files = {} for contig in sorted(blast_hits_purified.keys()): for base_identifier in sorted(blast_hits_purified[contig].keys()): # Skip rRNA hits for now (we work with them later) if re.search("rRNA", blast_hits_purified[contig][base_identifier]['cog']): continue # Skip hits which have already been placed; otherwise, mark them as placed if blast_hits_purified[contig][base_identifier]['is_already_placed']: continue blast_hits_purified[contig][base_identifier]['is_already_placed'] = True base_start = blast_hits_purified[contig][base_identifier]['start'] - flanking_length base_end = blast_hits_purified[contig][base_identifier]['end'] + flanking_length nr_of_blast_hits = len(blast_hits_purified[contig].keys()) check_identifier =0 while check_identifier < nr_of_blast_hits: # Skip rRNA hits for now (we work with them later) if re.search(r'rRNA', blast_hits_purified[contig][check_identifier]['cog']): check_identifier +=1 continue # Skip hits which have already been placed; otherwise, mark them as placed if blast_hits_purified[contig][check_identifier]['is_already_placed']: check_identifier +=1 continue check_start = blast_hits_purified[contig][check_identifier]['start'] - flanking_length check_end = blast_hits_purified[contig][check_identifier]['end'] + flanking_length # Check for overlap if base_start <= check_start and check_start <= base_end and base_end <= check_end: # Base -------- # Check -------- base_end = check_end blast_hits_purified[contig][check_identifier]['is_already_placed'] = True check_identifier = 0 continue elif base_start <= check_start and check_end <= base_end: # Base -------- # Check ---- blast_hits_purified[contig][check_identifier]['is_already_placed'] = True check_identifier = 0 continue elif check_start <= base_start and base_start <= check_end and check_end <= base_end: # Base -------- # Check -------- base_start = check_start blast_hits_purified[contig][check_identifier]['is_already_placed'] = True check_identifier = 0 continue elif check_start <= base_start and base_end <= check_end: # Base ---- # Check -------- base_start = check_start base_end = check_end blast_hits_purified[contig][check_identifier]['is_already_placed'] = True check_identifier = 0 continue check_identifier += 1 prae_contig_coordinates[contig][base_start][base_end] = 1 # Produce the input files for Genewise input = open(args.formatted_input_file, 'r') contig_name = '' sequence = '' line = 'x' while line: line= input.readline() line = line.strip() line = re.sub(r'\s', '_', line) searchmatch =re.search(r'\A>(.+)', line) if searchmatch or not line: if not line: sequence += line if contig_name in prae_contig_coordinates: sequence_length = len(sequence) shortened_sequence="" # Start searching for the information to shorten the file. for start_B in sorted(prae_contig_coordinates[contig_name].keys()) : for end_B in sorted(prae_contig_coordinates[contig_name][start_B].keys()) : # Ok, now we have all information about the hit. Correct start and end if needed: if start_B < 0: start_B = 0 if end_B >= sequence_length: end_B = sequence_length -1 # Note: Genewise (GW) positions start with 1, Blast (B) positions with 0 -> thus differenciate between start_B and start_GW shortened_start_GW = len(shortened_sequence) + 1 count = -1 for nucleotide in sequence: count += 1 if not (count >= start_B and count <= end_B): continue shortened_sequence += nucleotide shortened_end_GW = len(shortened_sequence) addition_factor = (start_B + 1) - shortened_start_GW #$start_B + 1 == $start_GW contig_coordinates[contig_name][shortened_start_GW][shortened_end_GW] = addition_factor try: with open(args.output_dir_var + contig_name + "_sequence.txt", 'w') as f: fprintf(f, "%s\n", ">"+ contig_name + "\n" + sequence) f.close() except: print "ERROR: Can't create " + args.output_dir_var + contig_name + "_sequence.txt!" try: with open(args.output_dir_var + contig_name + "_sequence_shortened.txt", 'w') as f: fprintf(f, "%s\n",">" + contig_name + "\n" + shortened_sequence) f.close() shortened_sequence_files[args.output_dir_var + contig_name + "_sequence_shortened.txt"]=contig_name except: print "ERROR: Can't create " + args.output_dir_var + contig_name +"_sequence_shortened.txt!" if searchmatch: contig_name = searchmatch.group(1) sequence = "" else: sequence += line input.close() return contig_coordinates, shortened_sequence_files def fprintf(file, fmt, *args): """A helper function used to print to a specified file.""" file.write(fmt % args) def startGenewise(args, shortened_sequence_files, blast_hits_purified): """ Runs Genewise on the provided list of sequence files. (The provided Autovivification of purified BLAST hits is used for file naming purposes). Returns an Autovivification mapping the Genewise output files to each contig. """ print 'Run Genewise' genewise_outputfiles = Autovivify() # For each file which has been shortened by produceGenewiseFiles... for shortened_sequence_file in sorted(shortened_sequence_files.keys()): contig = shortened_sequence_files[shortened_sequence_file] # For each identifier associated with this contig in the output of parseBlastResults for identifier in sorted(blast_hits_purified[contig].keys()): cog = blast_hits_purified[contig][identifier]['cog'] # Prepare the output file name, and store it genewise_outputfile = args.output_dir_var + contig + '_' + cog + '_genewise.txt' genewise_outputfiles[contig][genewise_outputfile] = 1 # Prepare the Genewise command and run it mltreemap_dir = args.mltreemap_data + PATHDELIM + 'data' + PATHDELIM genewise_support = mltreemap_dir + PATHDELIM + 'genewise_support_files' + PATHDELIM hmm_dir = mltreemap_dir + "hmm_data" + PATHDELIM genewise_command = args.executables + PATHDELIM + 'genewise ' + \ hmm_dir + cog + '.hmm ' + \ shortened_sequence_file + ' -init local -quiet -gene ' + \ genewise_support + 'human.gf -matrix ' + \ genewise_support + 'blosum62.bla -codon ' + \ genewise_support + 'codon.table -hmmer -subs' + \ ' 0.01 -indel 0.01 -gap 11 -ext 1 -both -pep -sum > ' + genewise_outputfile os.system(genewise_command) # Return the list of output files for each contig return genewise_outputfiles def parse_genewise_results(args, genewise_outputfiles, contig_coordinates): """ Uses the provided Autovivification of Genewise output files and the provided Autovivification mapping the contig to its Genewise sequence's start and end points to produce files summarizing the purified Genewise results. Returns an Autovivification mapping the summary files to each contig. """ genewise_summary_files = Autovivify() # For each contig analyzed by Genewise... for contig in sorted(genewise_outputfiles.keys()): genewise_results_raw = Autovivify() genewise_results = Autovivify() at_least_one_hit = 0 count = 0 # Parse each output file of that contig for genewise_outputfile in sorted(genewise_outputfiles[contig].keys()): try: input = open(genewise_outputfile, 'r') except IOError: print "ERROR: Cannot open Genewise outputfile " + genewise_outputfile continue header_count = 0 sequence_count = -1 for line in input: line.strip() # If the line starts with a digit, parse it if re.match(r'\A\d', line): # Split the results based on one or more spaces between the desired data bitscore, query, _, _, _, start, end, _, _ = re.split(' +', line) bitscore = float(bitscore) start = int(start) end = int(end) # If there is at least one query, take note for future use if query is not None: at_least_one_hit = 1 # Determine the direction of the predicted amino acid sequence direction = 'forward' if start > end: temp = start start = end end = start direction = 'reverse' # Correct the positions # Genewise is run on a shortened sequence, so the true positions must be calculated for coords_start in sorted(contig_coordinates[contig].keys()): if start >= coords_start: for coords_end in sorted(contig_coordinates[contig][coords_start].keys()): if end <= coords_end: addition_factor = contig_coordinates[contig][coords_start][coords_end] start += addition_factor end += addition_factor break genewise_results_raw[contig][genewise_outputfile][header_count]['start'] = start genewise_results_raw[contig][genewise_outputfile][header_count]['end'] = end genewise_results_raw[contig][genewise_outputfile][header_count]['cog'] = query genewise_results_raw[contig][genewise_outputfile][header_count]['bitscore'] = bitscore genewise_results_raw[contig][genewise_outputfile][header_count]['direction'] = direction header_count += 1 # Otherwise, if the line starts with a '>', prepare to intake the sequence elif re.match(r'\A>', line): sequence_count += 1 genewise_results_raw[contig][genewise_outputfile][sequence_count]['sequence'] = '' # If the line begins with any word character, and contains neither 'Bits' (a title line) # nor 'Making' (a Genewise comment about the treatment of introns) elif re.match(r'\A\w', line) and not re.match(r'\ABits', line) and not re.match(r'\AMaking', line): genewise_results_raw[contig][genewise_outputfile][sequence_count]['sequence'] += line.strip() input.close() # Skip to next contig if there isn't at least 1 hit if at_least_one_hit != 1: continue # Purify the parsed results # For each genewise_outputfile for the contig... for base_genewise_outputfile in sorted(genewise_results_raw[contig].keys()): # For each count of the genewise_outputfile... for base_count in sorted(genewise_results_raw[contig][base_genewise_outputfile].keys()): base_start = genewise_results_raw[contig][base_genewise_outputfile][base_count]['start'] base_end = genewise_results_raw[contig][base_genewise_outputfile][base_count]['end'] base_cog = genewise_results_raw[contig][base_genewise_outputfile][base_count]['cog'] base_bitscore = genewise_results_raw[contig][base_genewise_outputfile][base_count]['bitscore'] base_direction = genewise_results_raw[contig][base_genewise_outputfile][base_count]['direction'] base_sequence = genewise_results_raw[contig][base_genewise_outputfile][base_count]['sequence'] # Ensure that the base_cog, base_start, and base_end are defined if base_cog is None or base_start is None or base_end is None: error_string = 'ERROR: The file "' + base_genewise_outputfile + '" cannot be parsed!\n' +\ 'Please contact the authors about it. As a quick solution to the problem, ' +\ 'try to remove the sequence which produced this hit from your input file.\n' sys.exit(error_string) base_length = base_end - base_start is_valid = 1 # Check against all other genewise_outputfiles for that contig # For each genewise_outputfile for the contig... for check_genewise_outputfile in sorted(genewise_results_raw[contig].keys()): # For each count of the genewise_outputfile... for check_count in sorted(genewise_results_raw[contig][check_genewise_outputfile].keys()): # Skip to next iteration if comparing the base to itself if base_count == check_count: continue check_start = genewise_results_raw[contig][check_genewise_outputfile][check_count]['start'] check_end = genewise_results_raw[contig][check_genewise_outputfile][check_count]['end'] check_cog = genewise_results_raw[contig][check_genewise_outputfile][check_count]['cog'] # Ensure that the check_cog, check_start, and check_end are defined if check_cog is None or check_start is None or check_end is None: error_string = 'ERROR: The file "' + check_genewise_outputfile + '" cannot be parsed!\n' +\ 'Please contact the authors about it. As a quick solution to the problem, ' +\ 'try to remove the sequence which produced this hit from your input file.\n' sys.exit(error_string) check_length = check_end - check_start info = Autovivify() info['base']['start'] = base_start info['base']['end'] = base_end info['check']['start'] = check_start info['check']['end'] = check_end overlap = calculate_overlap(info) # Purify the results # If the size of the overlap is more than half the size of the hit... if float(overlap) / float(base_length) > 0.5: # And if the hit and check are the same COG... if base_cog == check_cog: # Keep only the longer hit, since the major difference between the hits is the length if base_length < check_length: is_valid = 0 # The COGs are different, so only skip the hit if it is less than half the length of the check elif base_length < check_length / 2: is_valid = 0 # But if the overlap is not more than half the size of the hit, and the hit remains valid... if is_valid and base_cog == check_cog: # Invalidate the hit if it is a side hit of the same COG if base_length < check_length * 0.7: is_valid = 0 # If the hit is valid, save it if is_valid == 1: genewise_results[contig][count]['start'] = base_start genewise_results[contig][count]['end'] = base_end genewise_results[contig][count]['cog'] = base_cog genewise_results[contig][count]['direction'] = base_direction genewise_results[contig][count]['sequence'] = base_sequence count += 1 # Skip to next hit if there are no valid hits if count <= 0: continue # Write the summary file genewise_summary_file = args.output_dir_var + contig + '_genewise_result_summary.txt' try: output = open(genewise_summary_file, 'w') except IOError: print 'ERROR: Cannot open Genewise summary file ' + genewise_summary_file + ' for writing' sys.exit(0) genewise_summary_files[contig][genewise_summary_file] = 1 for count in sorted(genewise_results[contig].keys()): output.write(genewise_results[contig][count]['cog'] + '\t' +\ str(genewise_results[contig][count]['start']) + '\t' +\ str(genewise_results[contig][count]['end']) + '\t' +\ genewise_results[contig][count]['direction'] + '\t' +\ genewise_results[contig][count]['sequence'] + '\n') output.close() return genewise_summary_files def get_rRNA_hit_sequences(args, blast_hits_purified, cog_list, genewise_summary_files): """ rRNA does not get translated into protein. Regardless, we want to take the rRNA and summarize it in a way that is parallel to the Genewise summary files. This function does that using the provided Autovivification of purified BLAST hits, list of COGs, and Autovivification of Genewise summary files. Returns an Autovivification summarizing the coordinates for each rRNA hit. Returns a list of the rRNA summary files. """ # TK: ...the list of rRNA hit files is empty. contig_rRNA_coordinates = Autovivify() rRNA_hit_files = {} for contig in sorted(blast_hits_purified.keys()) : # note: We skipped the Genewise step (we are dealing with rRNA) but we bring the rRNA files in the # same structure as the Genewise summary files and bring them back into the ordinary pipeline. for identifier in sorted(blast_hits_purified[contig].keys()): if not re.search("rRNA", blast_hits_purified[contig][identifier]['cog']): continue start = blast_hits_purified[contig][identifier]["start"] end = blast_hits_purified[contig][identifier]["end"] cog = blast_hits_purified[contig][identifier]["cog"] direction = blast_hits_purified[contig][identifier]["direction"] contig_rRNA_coordinates[contig][identifier]["start"] = start contig_rRNA_coordinates[contig][identifier]["end"] = end contig_rRNA_coordinates[contig][identifier]["cog"] = cog contig_rRNA_coordinates[contig][identifier]["direction"] = direction outfile_name = args.output_dir_var + contig + '_rRNA_result_summary.txt' contig_rRNA_coordinates[contig][identifier]["outfile"] = outfile_name genewise_summary_files[contig][outfile_name] = 1 try: outfile = open(outfile_name, 'w') outfile.close() except IOError, e: print "ERROR: Can't create " + outfile_name + '!\n' sys.exit(0) try: input = open(args.input, 'r') except IOError, e: print "ERROR: Can't create " + args.input +'!\n' sys.exit(0) contig_name = '' sequence = '' line = 'x' while line: line= input.readline() line = line.strip() line = re.sub(r'\s', '_', line) searchmatch =re.search(r'\A>(.+)', line) if searchmatch or not line: if not line: sequence += line if contig_name in contig_rRNA_coordinates: sequence_length = len(sequence) shortened_sequence="" #start searching for the information to shorten the file. for identifier in sorted(contig_rRNA_coordinates[contig_name].keys()) : start = contig_rRNA_coordinates[contig_name][identifier]["start"] end = contig_rRNA_coordinates[contig_name][identifier]["end"] cog = contig_rRNA_coordinates[contig_name][identifier]["cog"] direction = contig_rRNA_coordinates[contig_name][identifier]["direction"] outfile = contig_rRNA_coordinates[contig_name][identifier]['outfile'] denominator = cog_list['all_cogs'][cog] count = -1 shortened_sequence = "" for nucleotide in sequence: count+=1 if not (count >= start and count <= end): continue shortened_sequence += nucleotide if direction == 'reverse': #ok, our hit has been on the opposite strand of the reference. #to get a proper alignment, we thus have to produce a negative strand version of the input nucleotides2 = ''.join(reversed(shortened_sequence)) shortened_sequence = "" nucleotides2 = nucleotides2.lower() for nucleotide in nucleotides2: if nucleotide == 't': nucleotide = 'a' elif nucleotide == 'a' : nucleotide = 't' elif nucleotide == 'c': nucleotide = 'g' elif nucleotide == 'g': nucleotide = 'c' shortened_sequence += nucleotide try: out = open(outfile, 'a') fprintf(out,'%s\t%s\t%s\t%s\t%s\n', cog, start, end,'n/a', shortened_sequence) out.close() except IOError, e: print "ERROR: Can't create " + outfile + '!\n' sys.exit(0) try: output_file = open(args.output_dir_var + contig_name + '_sequence.txt', 'w') fprintf(output_file, '>%s\n%s',contig_name, sequence) output_file.close() except IOError, e: print "ERROR: Can't create " + args.output_dir_var + contig_name + '_sequence.txt!\n' sys.exit(0) if searchmatch: contig_name = searchmatch.group(1) sequence = "" else: sequence += line input.close() return contig_rRNA_coordinates, rRNA_hit_files def prepare_and_run_hmmalign(args, genewise_summary_files, cog_list): """ Runs hmmalign using the provided COG list and summary of Genewise files. Returns an Autovivification of the resulting files from hmmalign. """ reference_data_prefix = args.reference_data_prefix hmmalign_singlehit_files = Autovivify(); print 'Run hmmalign' # Run hmmalign on each Genewise summary file for contig in sorted(genewise_summary_files.keys()): for genewise_summary_file in sorted(genewise_summary_files[contig].keys()): try: input = open(genewise_summary_file, 'r') except IOError: print "ERROR: Can't open " + genewise_summary_file + "!\n" sys.exit(0) line = input.readline() line = line.strip() while line: cog, start, end, _, sequence = line.split('\t') denominator = cog_list["all_cogs"][cog] f_contig = denominator + "_" + contig genewise_singlehit_file = args.output_dir_var +PATHDELIM +f_contig+'_'+cog+"_"+str(start)+"_"+str(end) hmmalign_singlehit_files[f_contig][genewise_singlehit_file + ".mfa"] = True genewise_singlehit_file_fa = genewise_singlehit_file + ".fa" try: outfile = open(genewise_singlehit_file_fa, 'w') fprintf(outfile, '>query\n%s', sequence) outfile.close() except IOError: print 'Can\'t create ' + genewise_singlehit_file_fa +'\n' sys.exit(0) mltreemap_resources = args.mltreemap_data + PATHDELIM + 'data' + PATHDELIM hmmalign_command = [ args.executables + PATHDELIM + 'hmmalign', '-m', '--mapali',\ mltreemap_resources + reference_data_prefix + 'alignment_data' + PATHDELIM + cog + '.fa',\ '--outformat', 'Clustal',\ mltreemap_resources + reference_data_prefix + 'hmm_data' + PATHDELIM + cog + '.hmm',\ genewise_singlehit_file_fa, '>', genewise_singlehit_file + '.mfa' ] os.system(' '.join(hmmalign_command)) line = input.readline() line = line.strip() input.close() return hmmalign_singlehit_files def get_non_wag_cogs(args): """ Returns an Autovivification listing the COGs which don't follow the WAG evolutionary model. """ non_wag_cog_list = Autovivify() try: non_wag_cogs_file = args.mltreemap_data + PATHDELIM + \ 'data' + PATHDELIM + 'tree_data' + PATHDELIM +'non_wag_cogs.txt' cogin = open(non_wag_cogs_file, 'r') except IOError: sys.exit('ERROR: Can\'t open ' + non_wag_cogs_file + '!\n') for line in cogin: line = line.strip() if re.search(r'\A#(.+)', line): denominator = re.search(r'\A#(.+)', line).group(1) else: cog, model = line.split('\t') non_wag_cog_list[denominator][cog] = model cogin.close() return non_wag_cog_list def concatenate_hmmalign_singlehits_files(args, hmmalign_singlehit_files, non_wag_cog_list): """ Concatenates the hmmalign files using the provided Autovivifications of hmmalign files and non-WAG COGs. Returns a list of the files containing the concatenated hmmalign results. Returns a list of the model used for each file. Returns a list of the number of sequences found in each file. """ # For each type of gene... concatenated_mfa_files = {} models_to_be_used = {} nrs_of_sequences = {} for f_contig in sorted(hmmalign_singlehit_files.keys()): # Determine what type of gene is currently represented, or die an error sequences = Autovivify() model_to_be_used = "" query_sequence = "" denominator = "" if re.search(r'\A(.)', f_contig): denominator = re.match(r'\A(.)', f_contig).group(1) else: sys.exit('ERROR: The analysis type could not be parsed from ' + f_contig + '!\n') # For each file... for hmmalign_singlehit_file in sorted(hmmalign_singlehit_files[f_contig].keys()): # Open the file try: input = open(hmmalign_singlehit_file, 'r') except IOError: sys.exit('Can\'t open ' + hmmalign_singlehit_file + '!\n') reached_data_part = False # Determine the best AA model if re.search(r'\A.+_(.{7})_\d+_\d+\.mfa\Z', hmmalign_singlehit_file): cog = re.search(r'\A.+_(.{7})_\d+_\d+\.mfa\Z', hmmalign_singlehit_file).group(1) else: sys.exit('ERROR: The COG could not be parsed from ' + hmmalign_singlehit_file + '!\n') if non_wag_cog_list[denominator][cog] and model_to_be_used != 'PROTGAMMAWAG': model_to_be_used = non_wag_cog_list[denominator][cog] else: model_to_be_used = 'PROTGAMMAWAG' # Get sequence from file for _line in input: line = _line.strip() if re.search(r'query', line): reached_data_part = True if not reached_data_part: continue searchResult = re.search(r'\A(.+) (\S+)\Z', line) if searchResult: name_long = searchResult.group(1) sequence_part = searchResult.group(2) sequence_name = '' if re.search(r'query', name_long): query_sequence += sequence_part elif re.search(r'(\d+)_', name_long): sequence_name = re.search(r'(\d+)_', name_long).group(1) if sequences[sequence_name]: sequences[sequence_name] += sequence_part else: sequences[sequence_name] = sequence_part input.close() models_to_be_used[f_contig] = model_to_be_used concatenated_mfa_files[f_contig] = args.output_dir_var + f_contig + '.mfa' # Write to the output file try: output = open(args.output_dir_var + f_contig + '.mfa', 'w') except IOError: sys.exit('ERROR: Can\'t create ' + args.output_dir_var + f_contig + '.mfa\n') output.write('>query\n' + query_sequence + '\n') nrs_of_sequences[f_contig] = 1 for sequence_name in sorted(sequences.keys()): nrs_of_sequences[f_contig] += 1 sequence = sequences[sequence_name] output.write('>' + sequence_name + '\n' + sequence + '\n') output.close() return (concatenated_mfa_files, nrs_of_sequences, models_to_be_used) def start_gblocks(args, concatenated_mfa_files, nrs_of_sequences): """ Runs Gblocks using the provided lists of the concatenated hmmalign files, and the number of sequences in each file. Returns a list of files resulting from Gblocks. """ gblocks_files = {} print 'Run Gblocks' for f_contig in sorted(concatenated_mfa_files.keys()) : concatenated_mfa_file = concatenated_mfa_files[f_contig] nr_of_sequences = nrs_of_sequences[f_contig] min_flank_pos = int(nr_of_sequences * 0.55) gblocks_file = concatenated_mfa_file+ "-gb" gblocks_files[f_contig] = gblocks_file; gblocks_command = [ args.executables + PATHDELIM + "Gblocks" ] gblocks_command.append(concatenated_mfa_file) gblocks_command += ['-t=p', '-s=y', '-u=n', '-p=t', '-b3=15',\ '-b4=3', '-b5=h', '-b2='+str(min_flank_pos),\ '>', '/dev/null'] os.system(' '.join(gblocks_command)) return gblocks_files def produce_phy_file(args, gblocks_files, nrs_of_sequences): """ Produces phy files from the provided list of Gblocks result files, and the number of sequences in each file. Returns an Autovivification containing the names of the produced phy files. """ phy_files = Autovivify() sequence_lengths = Autovivify() # Open each Gblocks result file for f_contig in sorted(gblocks_files.keys()): sequences_for_phy = Autovivify() do_not_continue = 0 sequences_raw = Autovivify() gblocks_file = gblocks_files[f_contig] try: input = open(gblocks_file, 'r') except IOError: sys.exit('ERROR: Can\'t open ' + gblocks_file + '!\n') for line in input: line = line.strip() seq_name_search = re.search(r'\A>(.+)', line) if seq_name_search: seq_name = seq_name_search.group(1) # Flag the user if the reference alignment contains the number -666, which is needed later in the code if seq_name == -666: sys.exit('ERROR: Your reference alignment contains element with the number -666. ' +\ 'Please change it, because this number is needed for internal purposes.\n') if seq_name == 'query': seq_name = -666 else: line = re.sub(r' ', '', line) if seq_name in sequences_raw: sequences_raw[seq_name] += line else: sequences_raw[seq_name] = line input.close() # Ensure the sequences contain only valid characters for RAxML for seq_name in sorted(sequences_raw.keys()): if do_not_continue == 1: continue sequence = sequences_raw[seq_name] count = 0 sequence_lengths[f_contig] = len(sequence) sequence = re.sub(r'\.', 'X', sequence) sequence = re.sub(r'\*', 'X', sequence) sequence = re.sub('-', 'X', sequence) if re.search(r'\AX+\Z', sequence): sequence = re.sub('X', 'V', sequence, 1) if seq_name == -666: seq_dummy = re.sub('X', '', sequence) if len(seq_dummy) < args.gblocks: do_not_continue = 1 exit_file_name = args.output_dir_var + f_contig + '_exit_after_Gblocks.txt' try: output = open(exit_file_name, 'w') except IOError: sys.exit('ERROR: Can\'t open ' + exit_file_name + '!\n') output.write('final alignment after gblocks is too short (<' + str(args.gblocks) + 'AAs) ' +\ '- insufficient number of marker gene residues in query sequence.\n') output.close() continue sub_sequences = re.findall(r'.{1,50}', sequence) for sub_sequence in sub_sequences: sub_sequence = re.sub('U', 'T', sub_sequence) # TK: This for debug; got error from RAxML when encountering Uracil sequences_for_phy[f_contig][count][int(seq_name)] = sub_sequence count += 1 if do_not_continue == 1: continue # Write the sequences to the phy file phy_file_name = args.output_dir_var + f_contig + '.phy' phy_files[f_contig] = phy_file_name try: output = open(phy_file_name, 'w') except IOError: sys.exit('ERROR: Can\'t open ' + phy_file_name + '!\n') nr_of_sequences = nrs_of_sequences[f_contig] output.write(' ' + str(nr_of_sequences) + ' ' + str(sequence_lengths[f_contig]) + '\n') for count in sorted(sequences_for_phy[f_contig].keys()): for seq_name in sorted(sequences_for_phy[f_contig][count].keys()): sequence_part = sequences_for_phy[f_contig][count][seq_name] if count== 0: print_seqname = seq_name if seq_name == -666: print_seqname = 'query' output.write(str(print_seqname)) length = len(str(print_seqname)) c = length while c < 10: output.write(' ') c += 1 output.write(sequence_part + '\n') output.write('\n') output.close() return phy_files #TK undef_hashes on genewise_summary_files, hmmalign_singlehit_files, concatenated_mfa_files, nrs_of_sequences, gblocks_files def start_RAxML(args, phy_files, cog_list, models_to_be_used): """ Run RAxML using the provided Autovivifications of phy files and COGs, as well as the list of models used for each COG. Returns an Autovivification listing the output files of RAxML. Returns an Autovivification containing the reference tree file associated with each functional or rRNA COG. """ expected_raxml_outfiles = Autovivify() raxml_outfiles = Autovivify() print 'Run RAxML' # Notify the user, if they've indicated otherwise, that bootstraps cannot be used with the Maximum Parsimony settings of RAxML. # TK: Should this be moved to the beginning of the program when the user first specifies their options? if args.bootstraps > 1 and args.phylogeny == 'p': print 'ATTENTION: You intended to do ' + str(args.bootstraps) + ' bootstrap replications. Unfortunately, bootstrapping is ' +\ 'disabled in the parsimony mode of MLTreeMap. The pipeline will continue without bootstrapping.\n' args.bootstraps = 1 bootstrap_replicates = args.bootstraps args2 = Autovivify() mltree_resources = args.mltreemap_data + PATHDELIM + 'data' + PATHDELIM for f_contig in sorted(phy_files.keys()): # Establish the reference tree file to be used for this contig reference_tree_file = mltree_resources + 'tree_data' + PATHDELIM + args.reference_tree phy_file = phy_files[f_contig] if re.search(r'\A(.)', f_contig): denominator = re.search(r'\A(.)', f_contig).group(0) if not denominator == 'p' and not denominator == 'g' and not denominator == 'i': for cog in sorted(cog_list['all_cogs'].keys()): if not cog_list['all_cogs'][cog] == denominator: continue reference_tree_file = mltree_resources + 'tree_data' + PATHDELIM + cog + '_tree.txt' break # Determine the output file names, and remove any pre-existing output files args2['reference_tree_file_of_denominator'][denominator] = reference_tree_file raxml_files = [args.output_dir_var + 'RAxML_info.' + f_contig,\ args.output_dir_var + 'RAxML_labelledTree.' + f_contig,\ args.output_dir_var + 'RAxML_classification.' + f_contig] for raxml_file in raxml_files: try: shutil.rmtree(raxml_file) except OSError: pass raxml_option = args.phylogeny model_to_be_used = models_to_be_used[f_contig] if model_to_be_used is None: sys.exit('ERROR: No best AA model could be detected for the ML step!\n') # Set up the command to run RAxML raxml_command = [ args.executables + PATHDELIM + 'raxmlHPC', '-m', model_to_be_used] if bootstrap_replicates > 1: raxml_command += [ '-x', '12345', '-#', bootstrap_replicates] # Run RAxML using multiple threads, if CPUs available if args.num_threads: if ( int(args.num_threads) >= 1 ) and ( int(args.num_threads) <= available_cpu_count() ): raxml_command += ['-T', str(int(args.num_threads))] raxml_command += [ '-s', phy_file, '-t', reference_tree_file, '-f', str(raxml_option), '-n', f_contig,\ '-w', str(args.output_dir_var), '>',\ str(args.output_dir_var) + str(f_contig) + '_RAxML.txt'] os.system(' '.join(raxml_command)) # Rename the RAxML output files for f_contig in sorted(phy_files.keys()): denominator = '' if re.match(r'\A(.)', f_contig): denominator = re.match(r'\A(.)', f_contig).group(1) move_command = [ 'mv', str(args.output_dir_var) + 'RAxML_info.' + str(f_contig), \ str(args.output_dir_var) + str(f_contig) + '.RAxML_info.txt'] if os.path.exists(str(args.output_dir_var) + 'RAxML_info.' + str(f_contig)): os.system(' '.join(move_command)) if raxml_option == 'v': raxml_outfiles[denominator][f_contig]['classification'] = str(args.output_dir_var) + str(f_contig) + '.RAxML_classification.txt' raxml_outfiles[denominator][f_contig]['labelled_tree'] = str(args.output_dir_var) + str(f_contig) + '.originalRAxML_labelledTree.txt' move_command1 = [ 'mv', str(args.output_dir_var) + 'RAxML_classification.' + str(f_contig),\ str(raxml_outfiles[denominator][f_contig]['classification'])] move_command2 = [ 'mv', str(args.output_dir_var) + 'RAxML_originalLabelledTree.' + str(f_contig),\ str(raxml_outfiles[denominator][f_contig]['labelled_tree'])] remove_command = [ 'rm', str(args.output_dir_var) + 'RAxML_labelledTree.' + str(f_contig)] if os.path.exists(str(args.output_dir_var) + 'RAxML_classification.' + str(f_contig)): os.system(' '.join(move_command1)) if os.path.exists(str(args.output_dir_var) + 'RAxML_originalLabelledTree.' + str(f_contig)): os.system(' '.join(move_command2)) if os.path.exists(str(args.output_dir_var) + 'RAxML_labelledTree.' + str(f_contig)): os.system(' '.join(remove_command)) else: print "Some files were not successfully created for", str(f_contig) #CML elif raxml_option == 'p': raxml_outfiles[denominator][f_contig] = str(args.output_dir_var) + str(f_contig) + '.RAxML_parsimonyTree.txt' move_command1 = [ 'mv', str(args.output_dir_var) + 'RAxML_parsimonyTree.' + str(f_contig),\ str(raxml_outfiles[denominator][f_contig])] os.system(' '.join(move_command1)) else: sys.exit('ERROR: The chosen RAxML mode is invalid. This should have been noticed earlier by MLTreeMap.' +\ 'Please notify the authors\n') return raxml_outfiles, args2 def parse_RAxML_output(args, args2, tree_numbers_translation, raxml_outfiles, text_of_analysis_type): """ Parse the RAxML output files. Returns an Autovivification of the final RAxML output files. """ raxml_option = args.phylogeny print 'Finishing' output_directory_final_RAxML = args.output_dir_raxml final_RAxML_output_files = Autovivify() for denominator in sorted(raxml_outfiles.keys()): description_text = '# ' + str(text_of_analysis_type[denominator]) + '\n' reference_tree_file = args2['reference_tree_file_of_denominator'][denominator] terminal_children_strings_of_reference = read_and_understand_the_reference_tree(reference_tree_file) content_of_previous_labelled_tree_file = '' rooted_labelled_trees = '' insertion_point_node_hash = '' final_assignment_target_strings = Autovivify() for f_contig in sorted(raxml_outfiles[denominator].keys()): denominator = '' if re.search(r'\A(.)', f_contig): denominator = re.search(r'\A(.)', f_contig).group(1) content_of_labelled_tree_file = '' assignments = Autovivify() nr_of_assignments = 0 if raxml_option == 'v': classification_file = raxml_outfiles[denominator][f_contig]['classification'] labelled_tree_file = raxml_outfiles[denominator][f_contig]['labelled_tree'] try: input = open(labelled_tree_file, 'r') except IOError: sys.exit('ERROR: Can\'t open ' + str(labelled_tree_file) + '!\n') for line in input: line = line.strip() content_of_labelled_tree_file += str(line) input.close() if not content_of_labelled_tree_file == content_of_previous_labelled_tree_file: rooted_labelled_trees, insertion_point_node_hash = read_understand_and_reroot_the_labelled_tree(labelled_tree_file) final_assingment_target_strings = Autovivify() new_assignments = Autovivify() at_least_one_new_assignment = 0 try: input = open(classification_file, 'r') except IOError: sys.exit('ERROR: Can\'t open ' + str(classification_file) + '!\n') for line in input: line = line.strip() query, insertion_point_l, weight = line.split(' ') assignment = '' if re.search(r'I(\d+)', insertion_point_l): assignment = re.search(r'I(\d+)', insertion_point_l).group(1) nr_of_assignments += 1 assignments[assignment] = weight if not assignment in final_assignment_target_strings.keys(): new_assignments[assignment] = 1 at_least_one_new_assignment = 1 input.close() if at_least_one_new_assignment > 0: prae_assignment_target_strings = identify_the_correct_terminal_children_of_each_assignment(\ terminal_children_strings_of_reference, rooted_labelled_trees,\ insertion_point_node_hash, new_assignments) for assignment in sorted(prae_assignment_target_strings.keys()): assignment_target_string = prae_assignment_target_strings[assignment] final_assignment_target_strings[assignment] = assignment_target_string elif raxml_option == 'p': mp_tree_file = raxml_outfiles[denominator][f_contig] assignment = 'mp_root' assignments[assignment] = 1 nr_of_assignments = 1 prae_assignment_target_strings = get_correct_mp_assignment(terminal_children_strings_of_reference,\ mp_tree_file, assignments) assignment_target_string = prae_assignment_target_strings[assignment] final_assignment_target_strings[assignment] = assignment_target_string final_RAxML_filename = str(args.output_dir_raxml) + str(f_contig) + '_RAxML_parsed.txt' final_RAxML_output_files[denominator][final_RAxML_filename] = 1 try: output = open(final_RAxML_filename, 'w') except IOError: sys.exit('ERROR: Can\'t create ' + str(final_RAxML_filename) + '!\n') output.write(str(description_text) + '\n') for assignment in sorted(assignments.keys()): assignment_target_string = final_assignment_target_strings[assignment] weight = assignments[assignment] relative_weight = int(((int(weight) / int(nr_of_assignments)) * 100) + 0.5) assignment_terminal_targets = assignment_target_string.split(' ') nr_of_terminal_targets = len(assignment_terminal_targets) - 1 output.write('Placement weight ' + str(relative_weight) + '%: Assignment of query to ') if not nr_of_terminal_targets == 1: output.write('the lowest common ancestor of ') count = 1 while count <= nr_of_terminal_targets: assignment_terminal_target = assignment_terminal_targets[count - 1] is_last_element = 0 if count == nr_of_terminal_targets - 1: is_last_element = 1 name_of_terminal_target = '' name_of_terminal_target = tree_numbers_translation[denominator][assignment_terminal_target] try: name_of_terminal_target except NameError: sys.exit('ERROR: ' + str(assignment_terminal_target) + ' could not be located in the tree with the denominator ' +\ str(denominator) + '!\n') output.write(str(name_of_terminal_target) + ' (' + str(assignment_terminal_target) + ')') if count < nr_of_terminal_targets - 1: output.write(', ') if count == nr_of_terminal_targets - 1: output.write(' and ') if count == nr_of_terminal_targets: output.write('.') count += 1 output.close() content_of_previous_labelled_tree_file = content_of_labelled_tree_file return final_RAxML_output_files def read_and_understand_the_reference_tree(reference_tree_file): reference_tree_elements = read_the_reference_tree(reference_tree_file) reference_tree_info = create_tree_info_hash() reference_tree_info = get_node_subtrees(reference_tree_elements, reference_tree_info) reference_tree_info = assign_parents_and_children(reference_tree_info) terminal_children_strings_of_reference = build_terminal_children_strings_of_reference_nodes(reference_tree_info) return terminal_children_strings_of_reference def read_understand_and_reroot_the_labelled_tree(labelled_tree_file): labelled_tree_elements, insertion_point_node_hash = read_the_raxml_out_tree(labelled_tree_file) labelled_tree_info = create_tree_info_hash() labelled_tree_info = get_node_subtrees(labelled_tree_elements, labelled_tree_info) labelled_tree_info = assign_parents_and_children(labelled_tree_info) labelled_tree_info = build_tree_info_quartets(labelled_tree_info) rooted_labelled_trees = build_newly_rooted_trees(labelled_tree_info) return rooted_labelled_trees, insertion_point_node_hash def identify_the_correct_terminal_children_of_each_assignment(terminal_children_strings_of_reference, rooted_labelled_trees, insertion_point_node_hash, assignments): terminal_children_strings_of_assignments = build_terminal_children_strings_of_assignments(rooted_labelled_trees, insertion_point_node_hash, assignments) real_terminal_children_strings_of_assignments = compare_terminal_children_strings(terminal_children_strings_of_assignments, terminal_children_strings_of_reference) return real_terminal_children_strings_of_assignments def get_correct_mp_assignment(terminal_children_strings_of_reference, mp_tree_file, assignments): potential_terminal_children_strings = read_the_raxml_mp_out_tree(mp_tree_file, assignments) real_terminal_children_strings_of_assignments = compare_terminal_children_strings(potential_terminal_children_strings, terminal_children_strings_of_reference) return real_terminal_children_strings_of_assignments def read_the_reference_tree(reference_tree_file): try: input = open(reference_tree_file, 'r') except IOError: sys.exit('ERROR: Could not open ' + reference_tree_file + '!\n') tree_string = '' for line in input: line = line.strip() tree_string += line input.close() tree_string = re.sub('\(', 'L', tree_string) tree_string = re.sub('\)', 'R', tree_string) tree_string = re.sub(r':\d+\.\d+', '', tree_string) count = -2 while re.search('R', tree_string): tree_string = re.sub('R', 'Q' + str(count), tree_string, 1) count += -1 tree_string = re.sub(r'Q-\d+;', 'Q;', tree_string) tree_string = re.sub('L', '(', tree_string) tree_string = re.sub('Q', ')', tree_string) reference_tree_elements = split_tree_string(tree_string) return reference_tree_elements def read_the_raxml_out_tree(labelled_tree_file): insertion_point_node_hash = Autovivify() try: input = open(labelled_tree_file, 'r') except IOError: sys.exit('ERROR: Could not open ' + labelled_tree_file + '!\n') tree_string = '' for line in input: line = line.strip() tree_string += line input.close() tree_symbols_raw_1 = list(tree_string) bracket_diff = 0 tree_string_neu = '(' comma_count = 0 for tree_symbol_raw_1 in tree_symbols_raw_1: if comma_count < 2: if tree_symbol_raw_1 == '(': bracket_diff += 1 if tree_symbol_raw_1 == ')': bracket_diff += -1 if tree_symbol_raw_1 == ',' and bracket_diff == 1: comma_count += 1 if comma_count == 2: tree_string_neu += '):1.0[I666999666]' tree_string_neu += tree_symbol_raw_1 tree_string = tree_string_neu tree_string = re.sub('\(', 'L', tree_string) tree_string = re.sub('\)', 'R', tree_string) tree_string = re.sub('\[', 'Q', tree_string) tree_string = re.sub(':1\.0', '', tree_string) while re.search(r'((\D(\d+))QI(\d+)])', tree_string): to_be_replaced = re.search(r'((\D(\d+))QI(\d+)])', tree_string).group(1) replacement = re.search(r'((\D(\d+))QI(\d+)])', tree_string).group(2) terminal_leaf = re.search(r'((\D(\d+))QI(\d+)])', tree_string).group(3) insertion_point = re.search(r'((\D(\d+))QI(\d+)])', tree_string).group(4) if terminal_leaf <= 0: sys.exit('ERROR: Your tree has terminal leaves with numbers <= 0. Please change them to positive values!\n') insertion_point_node_hash[insertion_point] = terminal_leaf tree_string = re.sub(to_be_replaced, replacement, tree_string) count = -2 while re.search(r'QI(\d+)]', tree_string): insertion_point_node_hash[re.search(r'QI(\d+)]', tree_string).group(1)] = count tree_string = re.sub(r'QI(\d+)]', str(count), tree_string, 1) count += -1 tree_string = re.sub('L', '(', tree_string) tree_string = re.sub('R', ')', tree_string) tree_string = re.sub('Q', '[', tree_string) tree_elements = split_tree_string(tree_string) return tree_elements, insertion_point_node_hash def read_the_raxml_mp_out_tree(mp_tree_file, assignments): potential_terminal_children_strings = Autovivify() assignment = '' for assig in sorted(assignments.keys()): assignment = assig break try: input = open(mp_tree_file, 'r') except IOError: sys.exit('ERROR: Can\'t open ' + str(mp_tree_file) + '\n') tree_string = '' for line in input: line = line.strip() tree_string += line input.close() tree_string = re.sub('\(', 'L', tree_string) tree_string = re.sub('\)', 'R', tree_string) if not re.search(r',queryR;\Z', tree_string): sys.exit('ERROR: The query is not at the root of ' + str(mp_tree_file) + '!\n') else: tree_string = re.sub(r',queryR;\Z', 'R;', tree_string) tree_string = re.sub(r':\d+\.\d+', '', tree_string) count = -2 while re.search('R', tree_string): tree_string = re.sub('R', 'Q' + str(count), tree_string, 1) count += -1 tree_string = re.sub(r'Q-\d+;', 'Q;', tree_string) tree_string = re.sub('L', '(', tree_string) tree_string = re.sub('Q', ')', tree_string) tree_symbols = list(tree_string) bracket_diff = 0 comma_count = 0 substrings = ['', ','] for tree_symbol in tree_symbols: if comma_count < 1: if tree_symbol == '(': bracket_diff += 1 if tree_symbol == ')': bracket_diff += -1 if tree_symbol == ',' and bracket_diff == 1: comma_count += 1 substrings[0] += tree_symbol else: substrings[1] += tree_symbol for substring in substrings: terminal_children = Autovivify() for eachGroup in re.findall(r'(\D)(\d+)', str(substring)): if eachGroup[0] == '-': continue terminal_children[eachGroup[1]] = 1 potential_terminal_children_string = '' for potential_terminal_child in sorted(terminal_children.keys(), key=int): potential_terminal_children_string += str(potential_terminal_child) + ' ' potential_terminal_children_strings[assignment][potential_terminal_children_string] = 1 return potential_terminal_children_strings def split_tree_string(tree_string): tree_symbols_raw = list(str(tree_string)) count = -1 previous_symbol = '' tree_elements = Autovivify() for tree_symbol_raw in tree_symbols_raw: if re.search(r'\d', tree_symbol_raw) and (re.search(r'\d', previous_symbol) or previous_symbol == '-'): tree_elements[count] += tree_symbol_raw else: count += 1 tree_elements[count] = tree_symbol_raw previous_symbol = tree_symbol_raw return tree_elements def create_tree_info_hash(): tree_info = Autovivify() return tree_info def get_node_subtrees(tree_elements, tree_info): bracket_l_count = 0 bracket_r_count = 0 parents_of_node = Autovivify() tree_element_nr = -1 for tree_element in tree_elements.values(): tree_element_nr += 1 if str(tree_element) == '(': bracket_l_count = 1 bracket_r_count = 0 tree_sub_element_nr = tree_element_nr subtree_string = '(' while True: tree_sub_element_nr += 1 tree_sub_element = tree_elements[tree_sub_element_nr] if str(tree_sub_element) == '(': bracket_l_count += 1 if str(tree_sub_element) == ')': bracket_r_count += 1 if bracket_l_count == bracket_r_count: nodename = tree_elements[tree_sub_element_nr + 1] if str(nodename) == ';': nodename = -1 subtree_string += ')' + str(nodename) tree_info['subtree_of_node'][nodename] = subtree_string break else: subtree_string += str(tree_sub_element) for tree_element in tree_elements.values(): if not re.search(r'\d+', str(tree_element)): continue if tree_element in tree_info['subtree_of_node'].keys(): continue tree_info['subtree_of_node'][tree_element] = tree_element return tree_info def assign_parents_and_children(tree_info): for node in sorted(tree_info['subtree_of_node'].keys()): if node == -1: continue subtree = str(tree_info['subtree_of_node'][node]) parent = None for potential_parent in sorted(tree_info['subtree_of_node'].keys()): if node == potential_parent: continue potential_parent_subtree = str(tree_info['subtree_of_node'][potential_parent]) subtree = re.sub('\(', 'L', subtree) subtree = re.sub('\)', '#', subtree) potential_parent_subtree = re.sub('\(', 'L', potential_parent_subtree) potential_parent_subtree = re.sub('\)', '#', potential_parent_subtree) potential_parent = str(potential_parent) if re.search(r'\AL'+re.escape(subtree)+r',.+#'+re.escape(potential_parent)+r'\Z', potential_parent_subtree) or \ re.search(r'\AL.+,'+re.escape(subtree)+r'#'+re.escape(potential_parent)+r'\Z', potential_parent_subtree): parent = potential_parent break tree_info['parent_of_node'][node] = parent tree_info['children_of_node'][parent][node] = 1 return tree_info def build_tree_info_quartets(tree_info): for node in sorted(tree_info['parent_of_node'].keys(), key=int): parent = tree_info['parent_of_node'][node] if int(parent) == -1: for roots_child in sorted(tree_info['children_of_node']['-1'].keys(), key=int): if roots_child == node: continue parent = roots_child tree_info['quartets'][node][parent] = 1 if node in tree_info['children_of_node']: for child in sorted(tree_info['children_of_node'][node].keys(), key=int): tree_info['quartets'][node][child] = 1 return tree_info def build_newly_rooted_trees(tree_info): tree_number = 0 list_of_already_used_attachments = Autovivify() rooted_trees = Autovivify() for node in sorted(tree_info['quartets'].keys(), key=int): if node in list_of_already_used_attachments: continue for attachment in sorted(tree_info['quartets'][node].keys(), key=int): list_of_already_used_attachments[attachment] = 1 tree_string = '' root = -1 node_infos = Autovivify() node_infos['previous_node'] = '' node_infos['node'] = ';' node_infos['open_attachments'][node] = 1 node_infos['open_attachments'][attachment] = 1 new_tree = recursive_tree_builder(tree_info, node_infos, tree_string) rooted_trees[tree_number] = new_tree tree_number += 1 return rooted_trees def recursive_tree_builder(tree_info, node_infos, tree_string): node = node_infos['node'] count = 0 for attachment in sorted(node_infos['open_attachments'].keys(), key=int): count += 1 if count == 1: tree_string += '(' node_infos2 = Autovivify() node_infos2['previous_node'] = node node_infos2['node'] = attachment count2 = 0 for attachment_of_used_attachment in sorted(tree_info['quartets'][attachment].keys()): if attachment_of_used_attachment in node_infos['open_attachments']: continue if attachment_of_used_attachment == node: continue count2 += 1 node_infos2['open_attachments'][attachment_of_used_attachment] = 1 if count2 > 0: tree_string = recursive_tree_builder(tree_info, node_infos2, tree_string) else: tree_string += str(attachment) if count == 1: tree_string += ',' if count == 2: tree_string += ')' + str(node) return tree_string def build_terminal_children_strings_of_assignments(rooted_trees, insertion_point_node_hash, assignments): terminal_children_strings_of_assignments = Autovivify() for assignment in sorted(assignments.keys()): internal_node_of_assignment = insertion_point_node_hash[assignment] for rooted_tree in rooted_trees.keys(): rooted_tree_elements = split_tree_string(rooted_trees[rooted_tree]) rooted_tree_info = create_tree_info_hash() rooted_tree_info = get_node_subtrees(rooted_tree_elements, rooted_tree_info) assignment_subtree = str(rooted_tree_info['subtree_of_node'][str(internal_node_of_assignment)]) terminal_children = Autovivify() if re.search(r'\A(\d+)\Z', assignment_subtree): terminal_children[re.search(r'\A(\d+)\Z', assignment_subtree).group(1)] = 1 else: for each_hit in re.findall(r'(\D)(\d+)', assignment_subtree): if each_hit[0] == '-': continue terminal_children[each_hit[1]] = 1 terminal_children_string_of_assignment = '' for terminal_child_of_assignment in sorted(terminal_children.keys(), key=int): terminal_children_string_of_assignment += str(terminal_child_of_assignment) + ' ' terminal_children_strings_of_assignments[assignment][terminal_children_string_of_assignment] = 1 return terminal_children_strings_of_assignments def build_terminal_children_strings_of_reference_nodes(reference_tree_info): terminal_children_strings_of_reference = Autovivify() for node in sorted(reference_tree_info['subtree_of_node'].keys()): reference_subtree = reference_tree_info['subtree_of_node'][node] terminal_children = Autovivify() if re.search(r'\A(\d+)\Z', str(reference_subtree)): terminal_children[re.search(r'\A(\d+)\Z', str(reference_subtree)).group(1)] = 1 else: for each_hit in re.findall(r'(.)(\d+)', str(reference_subtree)): if each_hit[0] == '-': continue terminal_children[each_hit[1]] = 1 terminal_children_string_of_reference = '' for terminal_child_of_reference in sorted(terminal_children.keys(), key=int): terminal_children_string_of_reference += str(terminal_child_of_reference) + ' ' terminal_children_strings_of_reference[terminal_children_string_of_reference] = 1 return terminal_children_strings_of_reference def compare_terminal_children_strings(terminal_children_strings_of_assignments, terminal_children_strings_of_reference): real_terminal_children_strings_of_assignments = Autovivify() there_was_a_hit = 0 for assignment in sorted(terminal_children_strings_of_assignments.keys()): real_terminal_children_string = '' for terminal_children_string_of_assignment in sorted(terminal_children_strings_of_assignments[assignment].keys()): if terminal_children_string_of_assignment in terminal_children_strings_of_reference: real_terminal_children_string = terminal_children_string_of_assignment real_terminal_children_strings_of_assignments[assignment] = real_terminal_children_string there_was_a_hit = 1 break if str(real_terminal_children_string) == '' and not str(assignment) == 'mp_root': sys.exit('ERROR: The RAxML output tree could not be rooted correctly!!!\n') if there_was_a_hit <= 0: sys.exit('ERROR: The RAxML output tree could not be rooted correctly!!!\n') return real_terminal_children_strings_of_assignments def concatenate_RAxML_output_files(args, final_RAxML_output_files, text_of_analysis_type): output_directory_final = args.output_dir_final for denominator in sorted(final_RAxML_output_files.keys()): nr_of_files = 0 assignments = Autovivify() description_text = '# ' + str(text_of_analysis_type[denominator]) + '\n' final_output_file_name = str(output_directory_final) + str(denominator) + '_concatenated_RAxML_outputs.txt' for final_RAxML_output_file in sorted(final_RAxML_output_files[denominator].keys()): nr_of_files += 1 try: input = open(final_RAxML_output_file, 'r') except IOError: sys.exit('ERROR: Can\'t open ' + str(final_RAxML_output_file) + '!\n') for line in input: line = line.strip() if re.search(r'Placement weight (\d+)%: (.+)\Z', line): weight = re.search(r'Placement weight (\d+)%: (.+)\Z', line).group(1) assignment = re.search(r'Placement weight (\d+)%: (.+)\Z', line).group(2) if assignment in assignments.keys(): assignments[assignment] += int(weight) else: assignments[assignment] = int(weight) else: continue input.close() assignments_with_relative_weights = Autovivify() for assignment in sorted(assignments.keys(), reverse=True): weight = assignments[assignment] relative_weight = (int(((float(weight)/float(nr_of_files))*10000.0)+0.5))/10000.0 assignments_with_relative_weights[relative_weight][assignment] = 1 try: output = open(final_output_file_name, 'w') except IOError: sys.exit('ERROR: Can\'t create ' + str(final_output_file_name) + '!\n') if args.showmessages: print str(denominator) + '_ results concatenated:' output.write(str(description_text) + '\n') sum_of_relative_weights = 0 for relative_weight in sorted (assignments_with_relative_weights.keys(), reverse=True): for assignment in sorted(assignments_with_relative_weights[relative_weight].keys(), reverse=True): sum_of_relative_weights += relative_weight if args.showmessages: print 'Placement weight ' + str(relative_weight) + '%: ' + str(assignment) output.write('Placement weight ' + str(relative_weight) + '%: ' + str(assignment) + '\n') output.close() if args.showmessages: print str(denominator) + '_ sum of placement weights (should be 100): ' + str(sum_of_relative_weights) def read_species_translation_files(args, cog_list): tree_numbers_translation = Autovivify() translation_files = Autovivify() phylogenetic_denominator = args.reftree if phylogenetic_denominator == 'g': translation_files[phylogenetic_denominator] = args.mltreemap_data + PATHDELIM + \ 'data' + PATHDELIM + 'tree_data' + \ PATHDELIM + 'tax_ids_geba_tree.txt' elif phylogenetic_denominator == 'i': translation_files[phylogenetic_denominator] = args.mltreemap_data + PATHDELIM + \ 'data' + PATHDELIM + 'tree_data' + \ PATHDELIM + 'tax_ids_fungitr.txt' else: translation_files[phylogenetic_denominator] = args.mltreemap_data + PATHDELIM + \ 'data' + PATHDELIM + 'tree_data' + \ PATHDELIM + 'tax_ids_nr.txt' for functional_cog in sorted(cog_list['functional_cogs'].keys()): denominator = cog_list['functional_cogs'][functional_cog] filename = 'tax_ids_' + str(functional_cog) + '.txt' translation_files[denominator] = args.mltreemap_data + PATHDELIM + \ 'data' + PATHDELIM + 'tree_data' + \ PATHDELIM + filename for phylogenetic_rRNA_cog in sorted(cog_list['phylogenetic_rRNA_cogs'].keys()): denominator = cog_list['phylogenetic_rRNA_cogs'][phylogenetic_rRNA_cog] filename = 'tax_ids_' + str(phylogenetic_rRNA_cog) + '.txt' translation_files[denominator] = args.mltreemap_data + PATHDELIM + \ 'data' + PATHDELIM + 'tree_data' + \ PATHDELIM + filename for denominator in sorted(translation_files.keys()): filename = translation_files[denominator] try: input = open(filename, 'r') except IOError: sys.exit('ERROR: Can\'t open ' + str(filename) + '!\n') if args.showmessages: print 'opened ' + str(filename) # TK for line in input: line = line.strip() try: number, translation = line.split('\t') except ValueError: sys.exit('ValueError: .split(\'\\t\') on ' + str(line)) tree_numbers_translation[denominator][number] = translation input.close() return tree_numbers_translation def available_cpu_count(): """ Number of available virtual or physical CPUs on this system, i.e. user/real as output by time(1) when called with an optimally scaling userspace-only program""" # cpuset # cpuset may restrict the number of *available* processors try: m = re.search(r'(?m)^Cpus_allowed:\s*(.*)$', open('/proc/self/status').read()) if m: res = bin(int(m.group(1).replace(',', ''), 16)).count('1') if res > 0: return res except IOError: pass # Python 2.6+ try: import multiprocessing return multiprocessing.cpu_count() except (ImportError, NotImplementedError): pass # http://code.google.com/p/psutil/ try: import psutil return psutil.NUM_CPUS except (ImportError, AttributeError): pass # POSIX try: res = int(os.sysconf('SC_NPROCESSORS_ONLN')) if res > 0: return res except (AttributeError, ValueError): pass # Windows try: res = int(os.environ['NUMBER_OF_PROCESSORS']) if res > 0: return res except (KeyError, ValueError): pass # jython try: from java.lang import Runtime runtime = Runtime.getRuntime() res = runtime.availableProcessors() if res > 0: return res except ImportError: pass # BSD try: sysctl = subprocess.Popen(['sysctl', '-n', 'hw.ncpu'], stdout=subprocess.PIPE) scStdout = sysctl.communicate()[0] res = int(scStdout) if res > 0: return res except (OSError, ValueError): pass # Linux try: res = open('/proc/cpuinfo').read().count('processor\t:') if res > 0: return res except IOError: pass # Solaris try: pseudoDevices = os.listdir('/devices/pseudo/') res = 0 for pd in pseudoDevices: if re.match(r'^cpuid@[0-9]+$', pd): res += 1 if res > 0: return res except OSError: pass # Other UNIXes (heuristic) try: try: dmesg = open('/var/run/dmesg.boot').read() except IOError: dmesgProcess = subprocess.Popen(['dmesg'], stdout=subprocess.PIPE) dmesg = dmesgProcess.communicate()[0] res = 0 while '\ncpu' + str(res) + ':' in dmesg: res += 1 if res > 0: return res except OSError: pass raise Exception('Can not determine number of CPUs on this system') def deleteFiles(args): print 'Deleting the files other than \'final_outputs\' folder' sectionsToBeDeleted = [] if args.delete: sectionsToBeDeleted = args.delete.split(':') filesToBeDeleted = [] for section in sectionsToBeDeleted: if section == '1': filesToBeDeleted += glob.glob(args.output_dir_var + '*.fa') filesToBeDeleted += glob.glob(args.output_dir_var + '*sequence.txt') filesToBeDeleted += glob.glob(args.output_dir_var + '*sequence_shortened.txt') filesToBeDeleted += glob.glob(args.output_dir_var + '*.fasta_formatted.txt') if section == '2': filesToBeDeleted += glob.glob(args.output_dir_var + '*BLAST_results*') filesToBeDeleted += glob.glob(args.output_dir_var + '*blast_result_purified.txt') filesToBeDeleted += glob.glob(args.output_dir_var + '*rRNA_result_summary.txt') if section == '3': filesToBeDeleted += glob.glob(args.output_dir_var + '*genewise.txt') filesToBeDeleted += glob.glob(args.output_dir_var + '*genewise_result_summary.txt') if section == '4': filesToBeDeleted += glob.glob(args.output_dir_var + '*.mfa') filesToBeDeleted += glob.glob(args.output_dir_var + '*.mfa-gb') filesToBeDeleted += glob.glob(args.output_dir_var + '*.mfa-gb.txt') if section == '5': filesToBeDeleted += glob.glob(args.output_dir_var + '*_exit_after_Gblocks.txt') filesToBeDeleted += glob.glob(args.output_dir_var + '*_RAxML.txt') filesToBeDeleted += glob.glob(args.output_dir_var + '*RAxML_classification.txt') filesToBeDeleted += glob.glob(args.output_dir_var + '*RAxML_info.txt') filesToBeDeleted += glob.glob(args.output_dir_var + '*RAxML_labelledTree.txt') filesToBeDeleted += glob.glob(args.output_dir_var + '*.phy') filesToBeDeleted += glob.glob(args.output_dir_var + '*.phy.reduced') for file in filesToBeDeleted: if path.exists(file): os.remove(file) if not args.verbose: dirsToBeDeleted = [args.output_dir_var, args.output_dir_raxml] for dir in dirsToBeDeleted: if path.exists(dir): shutil.rmtree(dir) else: pass def main(argv, errorlogger = None, runstatslogger = None): global parser # STAGE 1: Prompt the user and prepare files and lists for the pipeline parser = createParser() options, args = parser.parse_args(argv) options = checkParserArguments(options) removePreviousOutput(options) cog_list, text_of_analysis_type = createCogList(options) non_wag_cog_list = get_non_wag_cogs(options) splitFiles = splitFastaInput(options) # STAGE 2: Run BLAST to determine which COGs are present in the input sequence(s) runBlast(options, splitFiles) blastResults = readBlastResults(options) blast_hits_purified = parseBlastResults(options, blastResults, cog_list) # STAGE 3: Run Genewise (or not) to produce amino acid sequences based on the COGs found in the input sequence(s) contig_coordinates, shortened_sequence_files = produceGenewiseFiles(options, blast_hits_purified) if options.reftype == 'n': genewise_outputfiles = startGenewise(options, shortened_sequence_files, blast_hits_purified) genewise_summary_files = parse_genewise_results(options, genewise_outputfiles, contig_coordinates) contig_rRNA_coordinates, rRNA_hit_files = get_rRNA_hit_sequences(options, blast_hits_purified, cog_list, genewise_summary_files) elif options.reftype == 'a': genewise_summary_files = blastpParser(options, shortened_sequence_files, blast_hits_purified) # STAGE 4: Run hmmalign and Gblocks to produce the MSAs required to perform the subsequent ML/MP estimations hmmalign_singlehit_files = prepare_and_run_hmmalign(options, genewise_summary_files, cog_list); concatenated_mfa_files, nrs_of_sequences, models_to_be_used = concatenate_hmmalign_singlehits_files(options, hmmalign_singlehit_files, non_wag_cog_list) gblocks_files = start_gblocks(options, concatenated_mfa_files, nrs_of_sequences) # STAGE 5: Run RAxML to compute the ML/MP estimations phy_files = produce_phy_file(options, gblocks_files, nrs_of_sequences) raxml_outfiles, options2 = start_RAxML(options, phy_files, cog_list, models_to_be_used) tree_numbers_translation = read_species_translation_files(options, cog_list) final_RAxML_output_files = parse_RAxML_output(options, options2, tree_numbers_translation, raxml_outfiles, text_of_analysis_type) concatenate_RAxML_output_files(options, final_RAxML_output_files, text_of_analysis_type) # STAGE 6: Delete files as determined by the user deleteFiles(options) def MetaPathways_mltreemap(argv, errorlogger = None, runstatslogger =None): if errorlogger != None: errorlogger.write("#STEP\tMLTREEMAP_CALCULATION\n") createParser() main(argv, errorlogger = errorlogger, runstatslogger = runstatslogger) return (0,'') if __name__ == '__main__': createParser() main(sys.argv[1:])
wholebiome/MetaPathways_Python_Koonkie.3.0
libs/python_scripts/MetaPathways_mltreemap.py
Python
mit
111,054
[ "BLAST" ]
7555780831fa052b8b9f0012298a28d6d14be38bcecf9a40ecbcd7a035de39c0
#!/usr/bin/python # -*- coding: utf-8 -*- # --------------------------------------------------------------------- # Copyright (c) 2012 Michael Hull. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # - Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # - Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ---------------------------------------------------------------------- """Applying different channel densities over a cell. We start with a cell with a long axon, and then apply Hodgkin-Huxley channels over the surface. We look at the effect of changing the density of leak and sodium channels in just the axon of the neuron (not the soma) This example also shows the use of tags; 300 traces are recorded in this experiment; but we don't ever need to get involved in managing them directly. We can just specify that all traces recorded on simulation X should be tagged with "SIMY", and then tell the TagViewer to plot everything with a tag 'SIMY' """ from morphforge.stdimports import * from morphforgecontrib.stdimports import StandardModels def sim(glk_multiplier, gna_multiplier, tag): env = NEURONEnvironment() sim = env.Simulation() # Create a cell: morph = MorphologyBuilder.get_soma_axon_morph(axon_length=3000.0, axon_radius=0.3, soma_radius=9.0, axon_sections=20) cell = sim.create_cell(name="Cell1", morphology=morph) lk_chl = ChannelLibrary.get_channel(modelsrc=StandardModels.HH52, channeltype="Lk", env=env) na_chl = ChannelLibrary.get_channel(modelsrc=StandardModels.HH52, channeltype="Na", env=env) k_chl = ChannelLibrary.get_channel(modelsrc=StandardModels.HH52, channeltype="K", env=env) # Apply the channels uniformly over the cell cell.apply_channel(lk_chl) cell.apply_channel(na_chl) cell.apply_channel(k_chl) # Over-ride the parameters in the axon: cell.apply_channel(channel=lk_chl, where="axon", parameter_multipliers={'gScale':glk_multiplier}) cell.apply_channel(channel=na_chl, where="axon", parameter_multipliers={'gScale':gna_multiplier}) cell.set_passive( PassiveProperty.SpecificCapacitance, qty('1.0:uF/cm2')) for cell_location in CellLocator.get_locations_at_distances_away_from_dummy(cell=cell, distances=range(9, 3000, 100)): sim.record(cell, what=StandardTags.Voltage, cell_location=cell_location, user_tags=[tag]) # Create the stimulus and record the injected current: cc = sim.create_currentclamp(name="Stim1", amp=qty("250:pA"), dur=qty("5:ms"), delay=qty("100:ms"), cell_location=cell.soma) sim.record(cc, what=StandardTags.Current) # run the simulation return sim.run() # Display the results: results_a = [ sim(glk_multiplier=0.1, gna_multiplier=1.0, tag="SIM1"), sim(glk_multiplier=0.5, gna_multiplier=1.0, tag="SIM2"), sim(glk_multiplier=1.0, gna_multiplier=1.0, tag="SIM3"), sim(glk_multiplier=5.0, gna_multiplier=1.0, tag="SIM4"), sim(glk_multiplier=10.0, gna_multiplier=1.0, tag="SIM5"), ] TagViewer(results_a, timerange=(97.5, 140)*units.ms, show=False, plots = [ TagPlot("ALL{Voltage,SIM1}", ylabel='gLeak: 0.1\nVoltage', yrange=(-80, 50)*units.mV, legend_labeller=None), TagPlot("ALL{Voltage,SIM2}", ylabel='gLeak: 0.5\nVoltage', yrange=(-80, 50)*units.mV, legend_labeller=None), TagPlot("ALL{Voltage,SIM3}", ylabel='gLeak: 1.0\nVoltage', yrange=(-80, 50)*units.mV, legend_labeller=None), TagPlot("ALL{Voltage,SIM4}", ylabel='gLeak: 5.0\nVoltage', yrange=(-80, 50)*units.mV, legend_labeller=None), TagPlot("ALL{Voltage,SIM5}", ylabel='gLeak: 10.0\nVoltage',yrange=(-80, 50)*units.mV, legend_labeller=None), ]) results_b = [ sim(gna_multiplier=0.1, glk_multiplier=1.0, tag="SIM6"), sim(gna_multiplier=0.5, glk_multiplier=1.0, tag="SIM7"), sim(gna_multiplier=0.75, glk_multiplier=1.0, tag="SIM8"), sim(gna_multiplier=1.0, glk_multiplier=1.0, tag="SIM9"), ] TagViewer(results_b, timerange=(97.5, 140)*units.ms, show=True, plots = [ TagPlot("ALL{Voltage,SIM6}", ylabel='gNa: 0.10\nVoltage', yrange=(-80, 50)*units.mV, legend_labeller=None), TagPlot("ALL{Voltage,SIM7}", ylabel='gNa: 0.50\nVoltage', yrange=(-80, 50)*units.mV, legend_labeller=None), TagPlot("ALL{Voltage,SIM8}", ylabel='gNa: 0.75\nVoltage', yrange=(-80, 50)*units.mV, legend_labeller=None), TagPlot("ALL{Voltage,SIM9}", ylabel='gNa: 1.00\nVoltage', yrange=(-80, 50)*units.mV, legend_labeller=None), ])
mikehulluk/morphforge
src/morphforgeexamples/exset2_singlecell_simulations/singlecell_simulation080.py
Python
bsd-2-clause
5,791
[ "NEURON" ]
312727633ae59d6c28abb51e0c1a9621e5e21b64d123b7dbd58055fea5ec45d8
# # gPrime - A web-based genealogy program # # Copyright (C) 2000-2005 Donald N. Allingham # Copyright (C) 2008 Brian G. Matherly # Copyright (C) 2009 Benny Malengier # Copyright (C) 2012 Paul Franklin # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # """ The core of the Gramps plugin system. This module provides capability to load plugins from specified directories and provide information about the loaded plugins. Plugins are divided into several categories. These are: reports, tools, importers, exporters, quick reports, and document generators. """ #------------------------------------------------------------------------- # # Standard Python modules # #------------------------------------------------------------------------- import os import sys import re import logging LOG = logging.getLogger('.' + __name__) LOG.progagate = True from ..const import LOCALE as glocale _ = glocale.translation.gettext #------------------------------------------------------------------------- # # Gprime modules # #------------------------------------------------------------------------- from ..config import config from . import PluginRegister, ImportPlugin, ExportPlugin, DocGenPlugin from ..constfunc import win #------------------------------------------------------------------------- # # Constants # #------------------------------------------------------------------------- _UNAVAILABLE = _("No description was provided") #------------------------------------------------------------------------- # # BasePluginManager # #------------------------------------------------------------------------- class BasePluginManager: """ unique singleton storage class for a :class:`.PluginManager`. """ __instance = None def get_instance(): """ Use this function to get the instance of the :class:`.PluginManager` """ if BasePluginManager.__instance is None: BasePluginManager.__instance = 1 # Set to 1 for __init__() BasePluginManager.__instance = BasePluginManager() return BasePluginManager.__instance get_instance = staticmethod(get_instance) def __init__(self): """ This function should only be run once by get_instance() """ if BasePluginManager.__instance is not 1: raise Exception("This class is a singleton. " "Use the get_instance() method") self.__import_plugins = [] self.__export_plugins = [] self.__docgen_plugins = [] self.__attempt_list = [] self.__failmsg_list = [] self.__external_opt_dict = {} self.__success_list = [] self.__docgen_names = [] self.__mod2text = {} self.__modules = {} self.__pgr = PluginRegister.get_instance() self.__registereddir_set = set() self.__loaded_plugins = {} def reg_plugins(self, direct, dbstate=None, uistate=None, load_on_reg=False): """ Searches the specified directory, and registers python plugin that are being defined in gpr.py files. If a relationship calculator for env var LANG is present, it is immediately loaded so it is available for all. """ # if the directory does not exist, do nothing if not os.path.isdir(direct): return False # return value is True for error for (dirpath, dirnames, filenames) in os.walk(direct): root, subdir = os.path.split(dirpath) if subdir.startswith("."): dirnames[:] = [] continue for dirname in dirnames: # Skip hidden and system directories: if dirname.startswith(".") or dirname in ["po", "locale"]: dirnames.remove(dirname) # if the path has not already been loaded, save it in the # registereddir_list list for use on reloading. self.__registereddir_set.add(dirpath) self.__pgr.scan_dir(dirpath, uistate=uistate) if load_on_reg: # Run plugins that request to be loaded on startup and # have a load_on_reg callable. # first, remove hidden plugins_to_load = [] for plugin in self.__pgr.filter_load_on_reg(): if plugin.id in config.get("plugin.hiddenplugins"): continue plugins_to_load.append(plugin) # next, sort on dependencies # Probably a more effecient method to get dependency graph: plugins_sorted = [] count = 0 max_count = len(plugins_to_load) while plugins_to_load: for plugin in plugins_to_load[:]: # copy of list delay = False for depend in plugin.depends_on: if depend not in [p.id for p in plugins_sorted]: delay = True break if delay: pass # wait till next loop else: if plugin not in plugins_sorted: plugins_sorted.append(plugin) if plugin in plugins_to_load: plugins_to_load.remove(plugin) count += 1 if count > max_count: print("Cannot resolve the following plugin dependencies:") for plugin in plugins_to_load: print(" Plugin '%s' requires: %s" % ( plugin.id, plugin.depends_on)) break # now load them: for plugin in plugins_sorted: mod = self.load_plugin(plugin) if hasattr(mod, "load_on_reg"): try: results = mod.load_on_reg(dbstate, uistate, plugin) except: import traceback traceback.print_exc() print("Plugin '%s' did not run; continuing..." % plugin.name) continue try: iter(results) plugin.data += results except: plugin.data = results def is_loaded(self, pdata_id): """ return True if plugin is already loaded """ if pdata_id in self.__loaded_plugins: return True return False def load_plugin(self, pdata): """ Load a :class:`.PluginData` object. This means import of the python module. Plugin directories are added to sys path, so files are found """ if pdata.id in self.__loaded_plugins: return self.__loaded_plugins[pdata.id] need_reload = False filename = pdata.fname if filename in self.__modules: #filename is loaded already, a different plugin in this module _module = self.__modules[filename] self.__success_list.append((filename, _module, pdata)) self.__loaded_plugins[pdata.id] = _module self.__mod2text[_module.__name__] += ' - ' + pdata.description return _module if filename in self.__attempt_list: #new load attempt after a fail, a reload needed need_reload = True #remove previous fail of the plugins in this file dellist = [] for index, data in enumerate(self.__failmsg_list): if data[0] == filename: dellist.append(index) dellist.reverse() for index in dellist: del self.__failmsg_list[index] else: self.__attempt_list.append(filename) try: _module = self.import_plugin(pdata) if need_reload: # For some strange reason second importing of a failed plugin # results in success. Then reload reveals the actual error. # Looks like a bug in Python. _module = self.reload(_module, pdata) if _module: self.__success_list.append((filename, _module, pdata)) self.__modules[filename] = _module self.__loaded_plugins[pdata.id] = _module self.__mod2text[_module.__name__] = pdata.description return _module except: import traceback traceback.print_exc() self.__failmsg_list.append((filename, sys.exc_info(), pdata)) return None def import_plugin(self, pdata): """ Rather than just __import__(id), this will add the pdata.fpath to sys.path first (if needed), import, and then reset path. """ module = None if isinstance(pdata, str): pdata = self.get_plugin(pdata) if not pdata: return None if pdata.fpath not in sys.path: if pdata.mod_name: sys.path.insert(0, pdata.fpath) try: module = __import__(pdata.mod_name) except ValueError as err: # Python3 on Windows work with unicode in sys.path # but they are mbcs encode for checking validity if win(): # we don't want to load Gramps core plugin like this # only 3rd party plugins if "gramps" in pdata.fpath: try: sys.path.insert(0, ".") oldwd = os.getcwd() os.chdir(pdata.fpath) module = __import__(pdata.mod_name) os.chdir(oldwd) sys.path.pop(0) except ValueError as err: LOG.warning("Plugin error (from '%s'): %s" % (pdata.mod_name, err)) else: LOG.warning("Plugin error (from '%s'): %s" % (pdata.mod_name, err)) except ImportError as err: LOG.warning("Plugin error (from '%s'): %s" % (pdata.mod_name, err)) sys.path.pop(0) else: print("WARNING: module cannot be loaded") else: module = __import__(pdata.mod_name) return module def empty_managed_plugins(self): """ For some plugins, managed Plugin are used. These are only reobtained from the registry if this method is called """ # TODO: do other lists need to be reset here, too? self.__import_plugins = [] self.__export_plugins = [] self.__docgen_plugins = [] def reload_plugins(self): """ Reload previously loaded plugins """ pymod = re.compile(r"^(.*)\.py$") oldfailmsg = self.__failmsg_list[:] self.__failmsg_list = [] # attempt to reload all plugins that have succeeded in the past self.empty_managed_plugins() self.__loaded_plugins = {} oldmodules = self.__modules self.__modules = {} dellist = [] #reload first modules that loaded successfully previously for (index, plugin) in enumerate(self.__success_list): filename = plugin[0] pdata = plugin[2] filename = filename.replace('pyc','py') filename = filename.replace('pyo','py') if filename in self.__modules: #module already reloaded, a second plugin in same module continue try: self.reload(plugin[1], pdata) self.__modules[filename] = plugin[1] self.__loaded_plugins[pdata.id] = plugin[1] except: dellist.append(index) self.__failmsg_list.append((filename, sys.exc_info(), pdata)) dellist.reverse() for index in dellist: del self.__success_list[index] # Remove previously good plugins that are now bad # from the registered lists self.__purge_failed() # attempt to load the plugins that have failed in the past for (filename, message, pdata) in oldfailmsg: self.load_plugin(pdata) def reload(self, module, pdata): """ Reloads modules that might not be in the path. """ try: import imp fp, pathname, description = imp.find_module(pdata.mod_name, [pdata.fpath]) try: module = imp.load_module(pdata.mod_name, fp, pathname,description) finally: if fp: fp.close() except: if pdata.mod_name in sys.modules: del sys.modules[pdata.mod_name] module = self.import_plugin(pdata) return module def get_fail_list(self): """ Return the list of failed plugins. """ return self.__failmsg_list def get_success_list(self): """ Return the list of succeeded plugins. """ return self.__success_list def get_plugin(self, id): """ Returns a plugin object from :class:`.PluginRegister` by id. """ return self.__pgr.get_plugin(id) def get_reg_reports(self, gui=True): """ Return list of registered reports :param gui: bool indicating if GUI reports or CLI reports must be returned """ return self.__pgr.report_plugins(gui) def get_reg_tools(self, gui=True): """ Return list of registered tools :aram gui: bool indicating if GUI reports or CLI reports must be returned """ return self.__pgr.tool_plugins(gui) def get_reg_quick_reports(self): """ Return list of registered quick reports """ return self.__pgr.quickreport_plugins() def get_reg_views(self): """ Return list of registered views """ return self.__pgr.view_plugins() def get_reg_mapservices(self): """ Return list of registered mapservices """ return self.__pgr.mapservice_plugins() def get_reg_bookitems(self): """ Return list of reports registered as bookitem """ return self.__pgr.bookitem_plugins() def get_reg_gramplets(self): """ Return list of non hidden gramplets. """ return self.__pgr.gramplet_plugins() def get_reg_sidebars(self): """ Return list of registered sidebars. """ return self.__pgr.sidebar_plugins() def get_reg_databases(self): """ Return list of registered database backends """ return self.__pgr.database_plugins() def get_external_opt_dict(self): """ Return the dictionary of external options. """ return self.__external_opt_dict def get_module_description(self, module): """ Given a module name, return the module description. """ return self.__mod2text.get(module, '') def get_reg_importers(self): """ Return list of registered importers """ return self.__pgr.import_plugins() def get_reg_exporters(self): """ Return list of registered exporters """ return self.__pgr.export_plugins() def get_reg_docgens(self): """ Return list of registered docgen """ return self.__pgr.docgen_plugins() def get_reg_general(self, category=None): """ Return list of registered general libs """ return self.__pgr.general_plugins(category) def load_plugin_category(self, category): """ Make sure all plugins of a type are loaded. """ for plugin in self.__pgr.general_plugins(category): if not self.is_loaded(plugin): self.load_plugin(plugin) def get_plugin_data(self, category): """ Gets all of the data from general plugins of type category. plugin.data may be a single item, an iterable, or a callable. >>> PLUGMAN.get_plugin_data('CSS') <a list of raw data items> """ retval = [] data = None for plugin in self.__pgr.general_plugins(category): data = plugin.data try: iter(data) retval.extend(data) except: retval.append(data) return retval def process_plugin_data(self, category): """ Gathers all of the data from general plugins of type category, and pass it to a single process function from one of those plugins. >>> PLUGMAN.process_plugin_data('CSS') <a list of processed data items> """ retval = [] data = None process = None for plugin in self.__pgr.general_plugins(category): if plugin.process is not None: mod = self.load_plugin(plugin) if hasattr(mod, plugin.process): process = getattr(mod, plugin.process) data = plugin.data if data: try: iter(data) retval.extend(data) except: retval.append(data) if process: return process(retval) return retval def get_import_plugins(self): """ Get the list of import plugins. :return: :class:`.ImportPlugin` (a list of ImportPlugin instances) """ ## TODO: would it not be better to remove ImportPlugin and use ## only PluginData, loading from module when importfunction needed? if self.__import_plugins == []: #The module still needs to be imported for pdata in self.get_reg_importers(): if pdata.id in config.get("plugin.hiddenplugins"): continue mod = self.load_plugin(pdata) if mod: imp = ImportPlugin(name=pdata.name, description = pdata.description, import_function = getattr(mod, pdata.import_function), extension = pdata.extension) self.__import_plugins.append(imp) return self.__import_plugins def get_export_plugins(self): """ Get the list of export plugins. :return: :class:`.ExportPlugin` (a list of ExportPlugin instances) """ ## TODO: would it not be better to remove ExportPlugin and use ## only PluginData, loading from module when export/options needed? if self.__export_plugins == []: #The modules still need to be imported for pdata in self.get_reg_exporters(): if pdata.id in config.get("plugin.hiddenplugins"): continue mod = self.load_plugin(pdata) if mod: options = None if (pdata.export_options and hasattr(mod, pdata.export_options)): options = getattr(mod, pdata.export_options) exp = ExportPlugin(name=pdata.name_accell, description = pdata.description, export_function = getattr(mod, pdata.export_function), extension = pdata.extension, config = (pdata.export_options_title, options)) self.__export_plugins.append(exp) return self.__export_plugins def get_docgen_plugins(self): """ Get the list of docgen plugins. :return: :class:`.DocGenPlugin` (a list of DocGenPlugin instances) """ ## TODO: would it not be better to return list of plugindata, and only ## import those docgen that will then actuallly be needed? ## So, only do import when docgen.get_basedoc() is requested if self.__docgen_plugins == []: #The modules still need to be imported hiddenplugins = config.get("plugin.hiddenplugins") for pdata in self.get_reg_docgens(): if pdata.id in hiddenplugins: continue mod = self.load_plugin(pdata) if mod: oclass = None if pdata.optionclass: oclass = getattr(mod, pdata.optionclass) dgp = DocGenPlugin(name=pdata.name, description = pdata.description, basedoc = getattr(mod, pdata.docclass), paper = pdata.paper, style = pdata.style, extension = pdata.extension, docoptclass = oclass, basedocname = pdata.docclass ) self.__docgen_plugins.append(dgp) return self.__docgen_plugins def get_docgen_names(self): """ Get the list of docgen plugin names. :return: a list of :class:`.DocGenPlugin` names """ if self.__docgen_names == []: hiddenplugins = config.get("plugin.hiddenplugins") for pdata in self.get_reg_docgens(): if pdata.id not in hiddenplugins: self.__docgen_names.append(pdata.docclass) return self.__docgen_names def register_option(self, option, guioption): """ Register an external option. Register a mapping from option to guioption for an option that is not native to Gramps but provided by the plugin writer. This should typically be called during initialisation of a :class:`.ReportOptions` class. :param option: the option class :type option: class that inherits from gen.plug.menu.Option :param guioption: the gui-option class :type guioption: class that inherits from Gtk.Widget. """ self.__external_opt_dict[option] = guioption; def __purge_failed(self): """ Purge the failed plugins from the corresponding lists. """ failed_module_names = [ os.path.splitext(os.path.basename(filename))[0] for filename, msg, pdata in self.__failmsg_list ] self.__export_plugins[:] = [ item for item in self.__export_plugins if item.get_module_name() not in failed_module_names ][:] self.__import_plugins[:] = [ item for item in self.__import_plugins if item.get_module_name() not in failed_module_names ][:] self.__docgen_plugins[:] = [ item for item in self.__docgen_plugins if item.get_module_name() not in failed_module_names ][:]
sam-m888/gprime
gprime/plug/_manager.py
Python
gpl-2.0
23,952
[ "Brian" ]
53d297838643ef40a56f1b668d2a9f20dc9d30676a5a239a2d7ae0e95d232f93
############################################################################## # adaptiveMD: A Python Framework to Run Adaptive Molecular Dynamics (MD) # Simulations on HPC Resources # Copyright 2017 FU Berlin and the Authors # # Authors: Jan-Hendrik Prinz # Contributors: # # `adaptiveMD` is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as # published by the Free Software Foundation, either version 2.1 # of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with MDTraj. If not, see <http://www.gnu.org/licenses/>. ############################################################################## from engine import Engine, Trajectory, Frame, TrajectoryGenerationTask, \ TrajectoryExtensionTask
thempel/adaptivemd
adaptivemd/engine/__init__.py
Python
lgpl-2.1
1,120
[ "MDTraj" ]
0e0d42e9753d0739863674572a52f64e8789ba2854ec560ab5baf58df4155600
# Copyright (C) 2010-2018 The ESPResSo project # # This file is part of ESPResSo. # # ESPResSo is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ESPResSo is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. def vtf_pid_map(system, types='all'): """ Generates a VTF particle index map to ESPResSo ``id``. This fills the gap for particle ID's as required by VMD Parameters ---------- system: espressomd.System() object types : :obj:`str` Specifies the particle types. The id mapping depends on which particles are going to be printed. This should be the same as the one used in writevsf() and writevsf(). Returns ------- dict: A dictionary where the values are the VTF indices and the keys are the ESPresSo particle ``id`` """ if not hasattr(types, '__iter__'): types = [types] if types == "all": types = [types] id_to_write = [] for p in system.part: for t in types: if p.type == t or t == "all": id_to_write.append(p.id) return dict(zip(id_to_write, range(len(id_to_write)))) def writevsf(system, fp, types='all'): """ writes a VST (VTF Structure Format) to a file. This can be used to write the header of a VTF file. Parameters ---------- system: espressomd.System() object types : :obj:`str` Specifies the particle types. The string 'all' will write all particles fp : file File pointer to write to. """ vtf_index = vtf_pid_map(system, types) fp.write("unitcell {} {} {}\n".format(*(system.box_l))) for pid, vtf_id, in vtf_index.items(): fp.write("atom {} radius 1 name {} type {} \n".format(vtf_id, system.part[ pid].type, system.part[pid].type)) for pid, vtf_id, in vtf_index.items(): for b in system.part[pid].bonds: if (system.part[b[1]].id in vtf_index): fp.write("bond {}:{}\n".format( vtf_id, vtf_index[system.part[b[1]].id])) def writevcf(system, fp, types='all'): """ writes a VCF (VTF Coordinate Format) to a file. This can be used to write a timestep to a VTF file. Parameters ---------- system: espressomd.System() object types : :obj:`str` Specifies the particle types. The string 'all' will write all particles fp : file File pointer to write to. """ vtf_index = vtf_pid_map(system, types) fp.write("\ntimestep indexed\n") for pid, vtf_id, in vtf_index.items(): fp.write("{} {} {} {}\n".format(vtf_id, *(system.part[pid].pos)))
hmenke/espresso
src/python/espressomd/io/writer/vtf.py
Python
gpl-3.0
3,331
[ "ESPResSo", "VMD" ]
3324225107efccbd7474b6f6e82cab98b342ec64f7fbc3c604122cc929d7e450
# -*- coding: utf-8 -*- # # This file is part of cclib (http://cclib.github.io), a library for parsing # and interpreting the results of computational chemistry packages. # # Copyright (C) 2008-2014, the cclib development team # # The library is free software, distributed under the terms of # the GNU Lesser General Public version 2.1 or later. You should have # received a copy of the license along with cclib. You can also access # the full license online at http://www.gnu.org/copyleft/lgpl.html. """Parser for Psi3 and Psi4 output files""" import re import numpy from . import logfileparser from . import utils class Psi(logfileparser.Logfile): """A Psi log file.""" def __init__(self, *args, **kwargs): # Call the __init__ method of the superclass super(Psi, self).__init__(logname="Psi", *args, **kwargs) self.package = "Psi4" def __str__(self): """Return a string representation of the object.""" return "Psi log file %s" % (self.filename) def __repr__(self): """Return a representation of the object.""" return 'Psi("%s")' % (self.filename) def before_parsing(self): # There are some major differences between the output of Psi3 and Psi4, # so it will be useful to register which one we are dealing with. self.version = None # This is just used to track which part of the output we are in for Psi4, # with changes triggered by ==> things like this <== (Psi3 does not have this) self.section = None def normalisesym(self, label): """Use standard symmetry labels instead of NWChem labels. To normalise: (1) If label is one of [SG, PI, PHI, DLTA], replace by [sigma, pi, phi, delta] (2) replace any G or U by their lowercase equivalent >>> sym = NWChem("dummyfile").normalisesym >>> labels = ['A1', 'AG', 'A1G', "SG", "PI", "PHI", "DLTA", 'DLTU', 'SGG'] >>> map(sym, labels) ['A1', 'Ag', 'A1g', 'sigma', 'pi', 'phi', 'delta', 'delta.u', 'sigma.g'] """ # FIXME if necessary return label def extract(self, inputfile, line): """Extract information from the file object inputfile.""" # The version should always be detected. if "PSI3: An Open-Source Ab Initio" in line: self.version = 3 if "PSI4: An Open-Source Ab Initio" in line: self.version = 4 # This will automatically change the section attribute for Psi4, when encountering # a line that <== looks like this ==>, to whatever is in between. if (line.strip()[:3] == "==>") and (line.strip()[-3:] == "<=="): self.section = line.strip()[4:-4] if self.section == "DFT Potential": self.theory = "DFT" else: self.theory = "HF" # Psi3 print the coordinates in several configurations, and we will parse the # the canonical coordinates system in Angstroms as the first coordinate set, # although ir is actually somewhere later in the input, after basis set, etc. # We can also get or verify he number of atoms and atomic numbers from this block. if (self.version == 3) and (line.strip() == "-Geometry in the canonical coordinate system (Angstrom):"): self.skip_lines(inputfile, ['header', 'd']) coords = [] numbers = [] line = next(inputfile) while line.strip(): element = line.split()[0] numbers.append(self.table.number[element]) x = float(line.split()[1]) y = float(line.split()[2]) z = float(line.split()[3]) coords.append([x,y,z]) line = next(inputfile) self.set_attribute('natom', len(coords)) self.set_attribute('atomnos', numbers) if not hasattr(self, 'atomcoords'): self.atomcoords = [] self.atomcoords.append(coords) # ==> Geometry <== # # Molecular point group: c2h # Full point group: C2h # # Geometry (in Angstrom), charge = 0, multiplicity = 1: # # Center X Y Z # ------------ ----------------- ----------------- ----------------- # C -1.415253322400 0.230221785400 0.000000000000 # C 1.415253322400 -0.230221785400 0.000000000000 # ... # if (self.section == "Geometry") and ("Geometry (in Angstrom), charge" in line): assert line.split()[3] == "charge" charge = int(line.split()[5].strip(',')) self.set_attribute('charge', charge) assert line.split()[6] == "multiplicity" mult = int(line.split()[8].strip(':')) self.set_attribute('mult', mult) self.skip_line(inputfile, "blank") line = next(inputfile) # Usually there is the header and dashes, but, for example, the coordinates # printed when a geometry optimization finishes do not have it. if line.split()[0] == "Center": self.skip_line(inputfile, "dashes") line = next(inputfile) elements = [] coords = [] while line.strip(): el, x, y, z = line.split() elements.append(el) coords.append([float(x), float(y), float(z)]) line = next(inputfile) self.set_attribute('atomnos', [self.table.number[el] for el in elements]) self.natom = len(elements) if not hasattr(self, 'atomcoords'): self.atomcoords = [] # This condition discards any repeated coordinates that Psi print. For example, # geometry optimizations will print the coordinates at the beginning of and SCF # section and also at the start of the gradient calculation. if len(self.atomcoords) == 0 or self.atomcoords[-1] != coords: self.atomcoords.append(coords) # In Psi3 there are these two helpful sections. if (self.version == 3) and (line.strip() == '-SYMMETRY INFORMATION:'): line = next(inputfile) while line.strip(): if "Number of atoms" in line: self.set_attribute('natom', int(line.split()[-1])) line = next(inputfile) if (self.version == 3) and (line.strip() == "-BASIS SET INFORMATION:"): line = next(inputfile) while line.strip(): if "Number of AO" in line: self.set_attribute('nbasis', int(line.split()[-1])) line = next(inputfile) # Psi4 repeats the charge and multiplicity after the geometry. if (self.section == "Geometry") and (line[2:16].lower() == "charge ="): charge = int(line.split()[-1]) self.set_attribute('charge', charge) if (self.section == "Geometry") and (line[2:16].lower() == "multiplicity ="): mult = int(line.split()[-1]) self.set_attribute('mult', mult) # In Psi3, the section with the contraction scheme can be used to infer atombasis. if (self.version == 3) and line.strip() == "-Contraction Scheme:": self.skip_lines(inputfile, ['header', 'd']) indices = [] line = next(inputfile) while line.strip(): shells = line.split('//')[-1] expression = shells.strip().replace(' ', '+') expression = expression.replace('s', '*1') expression = expression.replace('p', '*3') expression = expression.replace('d', '*6') nfuncs = eval(expression) if len(indices) == 0: indices.append(range(nfuncs)) else: start = indices[-1][-1] + 1 indices.append(range(start, start+nfuncs)) line = next(inputfile) self.set_attribute('atombasis', indices) # In Psi3, the integrals program prints useful information when invoked. if (self.version == 3) and (line.strip() == "CINTS: An integrals program written in C"): self.skip_lines(inputfile, ['authors', 'd', 'b', 'b']) line = next(inputfile) assert line.strip() == "-OPTIONS:" while line.strip(): line = next(inputfile) line = next(inputfile) assert line.strip() == "-CALCULATION CONSTANTS:" while line.strip(): if "Number of atoms" in line: natom = int(line.split()[-1]) self.set_attribute('natom', natom) if "Number of atomic orbitals" in line: nbasis = int(line.split()[-1]) self.set_attribute('nbasis', nbasis) line = next(inputfile) # In Psi3, this part contains alot of important data pertaining to the SCF, but not only: if (self.version == 3) and (line.strip() == "CSCF3.0: An SCF program written in C"): self.skip_lines(inputfile, ['b', 'authors', 'b', 'd', 'b', 'mult', 'mult_comment', 'b']) line = next(inputfile) while line.strip(): if line.split()[0] == "multiplicity": mult = int(line.split()[-1]) self.set_attribute('mult', mult) if line.split()[0] == "charge": charge = int(line.split()[-1]) self.set_attribute('charge', charge) if line.split()[0] == "convergence": conv = float(line.split()[-1]) line = next(inputfile) if not hasattr(self, 'scftargets'): self.scftargets = [] self.scftargets.append([conv]) # The printout for Psi4 has a more obvious trigger for the SCF parameter printout. if (self.section == "Algorithm") and (line.strip() == "==> Algorithm <=="): self.skip_line(inputfile, 'blank') line = next(inputfile) while line.strip(): if "Energy threshold" in line: etarget = float(line.split()[-1]) if "Density threshold" in line: dtarget = float(line.split()[-1]) line = next(inputfile) if not hasattr(self, "scftargets"): self.scftargets = [] self.scftargets.append([etarget, dtarget]) # This section prints contraction information before the atomic basis set functions and # is a good place to parse atombasis indices as well as atomnos. However, the section this line # is in differs between HF and DFT outputs. # # -Contraction Scheme: # Atom Type All Primitives // Shells: # ------ ------ -------------------------- # 1 C 6s 3p // 2s 1p # 2 C 6s 3p // 2s 1p # 3 C 6s 3p // 2s 1p # ... if self.section == "Primary Basis" : if line[2:12] == "Basis Set:" : self.basisname = line.split()[2] if (self.section == "Primary Basis" or self.section == "DFT Potential") and line.strip() == "-Contraction Scheme:": self.skip_lines(inputfile, ['headers', 'd']) atomnos = [] atombasis = [] atombasis_pos = 0 line = next(inputfile) while line.strip(): element = line.split()[1] atomnos.append(self.table.number[element]) # To count the number of atomic orbitals for the atom, sum up the orbitals # in each type of shell, times the numbers of shells. Currently, we assume # the multiplier is a single digit and that there are only s and p shells, # which will need to be extended later when considering larger basis sets, # with corrections for the cartesian/spherical cases. ao_count = 0 shells = line.split('//')[1].split() for s in shells: count, type = s multiplier = 3*(type=='p') or 1 ao_count += multiplier*int(count) if len(atombasis) > 0: atombasis_pos = atombasis[-1][-1] + 1 atombasis.append(list(range(atombasis_pos, atombasis_pos+ao_count))) line = next(inputfile) self.set_attribute('natom', len(atomnos)) self.set_attribute('atomnos', atomnos) self.set_attribute('atombasis', atombasis) # The atomic basis set is straightforward to parse, but there are some complications # when symmetry is used, because in that case Psi4 only print the symmetry-unique atoms, # and the list of symmetry-equivalent ones is not printed. Therefore, for simplicity here # when an atomic is missing (atom indices are printed) assume the atomic orbitals of the # last atom of the same element before it. This might not work if a mixture of basis sets # is used somehow... but it should cover almost all cases for now. # # Note that Psi also print normalized coefficients (details below). # # ==> AO Basis Functions <== # # [ STO-3G ] # spherical # **** # C 1 # S 3 1.00 # 71.61683700 2.70781445 # 13.04509600 2.61888016 # ... if (self.section == "AO Basis Functions") and (line.strip() == "==> AO Basis Functions <=="): def get_symmetry_atom_basis(gbasis): """Get symmetry atom by replicating the last atom in gbasis of the same element.""" missing_index = len(gbasis) missing_atomno = self.atomnos[missing_index] ngbasis = len(gbasis) last_same = ngbasis - self.atomnos[:ngbasis][::-1].index(missing_atomno) - 1 return gbasis[last_same] dfact = lambda n: (n <= 0) or n * dfact(n-2) def get_normalization_factor(exp, lx, ly, lz): norm_s = (2*exp/numpy.pi)**0.75 if lx + ly + lz > 0: nom = (4*exp)**((lx+ly+lz)/2.0) den = numpy.sqrt(dfact(2*lx-1) * dfact(2*ly-1) * dfact(2*lz-1)) return norm_s * nom / den else: return norm_s self.skip_lines(inputfile, ['b', 'basisname']) line = next(inputfile) spherical = line.strip() == "spherical" if hasattr(self, 'spherical_basis'): assert self.spherical_basis == spherical else: self.spherical_basis = spherical gbasis = [] self.skip_line(inputfile, 'stars') line = next(inputfile) while line.strip(): element, index = line.split() atomno = self.table.number[element] index = int(index) # This is the code that adds missing atoms when symmetry atoms are excluded # from the basis set printout. Again, this will work only if all atoms of # the same element use the same basis set. while index > len(gbasis) + 1: gbasis.append(get_symmetry_atom_basis(gbasis)) gbasis.append([]) line = next(inputfile) while line.find("*") == -1: # The shell type and primitive count is in the first line. shell_type, nprimitives, smthg = line.split() nprimitives = int(nprimitives) # Get the angular momentum for this shell type. momentum = { 'S' : 0, 'P' : 1, 'D' : 2, 'F' : 3, 'G' : 4 }[shell_type.upper()] # Read in the primitives. primitives_lines = [next(inputfile) for i in range(nprimitives)] primitives = [list(map(float, pl.split())) for pl in primitives_lines] # Un-normalize the coefficients. Psi prints the normalized coefficient # of the highest polynomial, namely XX for D orbitals, XXX for F, and so on. for iprim, prim in enumerate(primitives): exp, coef = prim coef = coef / get_normalization_factor(exp, momentum, 0, 0) primitives[iprim] = [exp, coef] primitives = [tuple(p) for p in primitives] shell = [shell_type, primitives] gbasis[-1].append(shell) line = next(inputfile) line = next(inputfile) # We will also need to add symmetry atoms that are missing from the input # at the end of this block, if the symmetry atoms are last. while len(gbasis) < self.natom: gbasis.append(get_symmetry_atom_basis(gbasis)) self.gbasis = gbasis # A block called 'Calculation Information' prints these before starting the SCF. if (self.section == "Pre-Iterations") and ("Number of atoms" in line): natom = int(line.split()[-1]) self.set_attribute('natom', natom) if (self.section == "Pre-Iterations") and ("Number of atomic orbitals" in line): nbasis = int(line.split()[-1]) self.set_attribute('nbasis', nbasis) # ==> Iterations <== # Psi3 converges just the density elements, although it reports in the iterations # changes in the energy as well as the DIIS error. psi3_iterations_header = "iter total energy delta E delta P diiser" if (self.version == 3) and (line.strip() == psi3_iterations_header): if not hasattr(self, 'scfvalues'): self.scfvalues = [] self.scfvalues.append([]) line = next(inputfile) while line.strip(): ddensity = float(line.split()[-2]) self.scfvalues[-1].append([ddensity]) line = next(inputfile) # Psi4 converges both the SCF energy and density elements and reports both in the # iterations printout. However, the default convergence scheme involves a density-fitted # algorithm for efficiency, and this is often followed by a something with exact electron # repulsion integrals. In that case, there are actually two convergence cycles performed, # one for the density-fitted algorithm and one for the exact one, and the iterations are # printed in two blocks separated by some set-up information. if (self.section == "Iterations") and (line.strip() == "==> Iterations <=="): if not hasattr(self, 'scfvalues'): self.scfvalues = [] self.skip_line(inputfile, 'blank') header = next(inputfile) assert (header.strip() == "Total Energy Delta E RMS |[F,P]|" \ or header.strip() == "Total Energy Delta E Density RMS") scfvals = [] self.skip_line(inputfile, 'blank') line = next(inputfile) while line.strip() != "==> Post-Iterations <==": if line.strip() and line.split()[0] in ["@DF-RHF", "@RHF", "@DF-RKS", "@RKS"]: denergy = float(line.split()[4]) ddensity = float(line.split()[5]) scfvals.append([denergy, ddensity]) line = next(inputfile) self.section = "Post-Iterations" self.scfvalues.append(scfvals) # This section, from which we parse molecular orbital symmetries and # orbital energies, is quite similar for both Psi3 and Psi4, and in fact # the format for orbtials is the same, although the headers and spacers # are a bit different. Let's try to get both parsed with one code block. # # Here is how the block looks like for Psi4: # # Orbital Energies (a.u.) # ----------------------- # # Doubly Occupied: # # 1Bu -11.040586 1Ag -11.040524 2Bu -11.031589 # 2Ag -11.031589 3Bu -11.028950 3Ag -11.028820 # (...) # 15Ag -0.415620 1Bg -0.376962 2Au -0.315126 # 2Bg -0.278361 3Bg -0.222189 # # Virtual: # # 3Au 0.198995 4Au 0.268517 4Bg 0.308826 # 5Au 0.397078 5Bg 0.521759 16Ag 0.565017 # (...) # 24Ag 0.990287 24Bu 1.027266 25Ag 1.107702 # 25Bu 1.124938 # # The case is different in the trigger string. if "orbital energies (a.u.)" in line.lower(): # If this is Psi4, we will be in the appropriate section. assert (self.version == 3) or (self.section == "Post-Iterations") self.moenergies = [[]] self.mosyms = [[]] # Psi4 has dashes under the trigger line, but Psi3 did not. if self.version == 4: self.skip_line(inputfile, 'dashes') self.skip_line(inputfile, 'blank') # Both versions have this case insensisitive substring. doubly = next(inputfile) assert "doubly occupied" in doubly.lower() # Psi4 now has a blank line, Psi3 does not. if self.version == 4: self.skip_line(inputfile, 'blank') line = next(inputfile) while line.strip(): for i in range(len(line.split())//2): self.mosyms[0].append(line.split()[i*2][-2:]) self.moenergies[0].append(line.split()[i*2+1]) line = next(inputfile) # The last orbital energy here represented the HOMO. self.homos = [len(self.moenergies[0])-1] # Different numbers of blank lines in Psi3 and Psi4. if self.version == 3: self.skip_line(inputfile, 'blank') # The header for virtual orbitals is different for the two versions. unoccupied = next(inputfile) if self.version == 3: assert unoccupied.strip() == "Unoccupied orbitals" else: assert unoccupied.strip() == "Virtual:" # Psi4 now has a blank line, Psi3 does not. if self.version == 4: self.skip_line(inputfile, 'blank') line = next(inputfile) while line.strip(): for i in range(len(line.split())//2): self.mosyms[0].append(line.split()[i*2][-2:]) self.moenergies[0].append(line.split()[i*2+1]) line = next(inputfile) # Both Psi3 and Psi4 print the final SCF energy right after the orbital energies, # but the label is different. Psi4 also does DFT, and the label is also different in that case. if (self.version == 3 and "* SCF total energy" in line) or \ (self.section == "Post-Iterations" and ("@DF-RHF Final Energy:" in line or "@DF-RKS Final Energy" in line)): e = float(line.split()[-1]) if not hasattr(self, 'scfenergies'): self.scfenergies = [] self.scfenergies.append(utils.convertor(e, 'hartree', 'eV')) # ==> Molecular Orbitals <== # # 1 2 3 4 5 # # 1 0.7014827 0.7015412 0.0096801 0.0100168 0.0016438 # 2 0.0252630 0.0251793 -0.0037890 -0.0037346 0.0016447 # ... # 59 0.0000133 -0.0000067 0.0000005 -0.0047455 -0.0047455 # 60 0.0000133 0.0000067 0.0000005 0.0047455 -0.0047455 # # Ene -11.0288198 -11.0286067 -11.0285837 -11.0174766 -11.0174764 # Sym Ag Bu Ag Bu Ag # Occ 2 2 2 2 2 # # # 11 12 13 14 15 # # 1 0.1066946 0.1012709 0.0029709 0.0120562 0.1002765 # 2 -0.2753689 -0.2708037 -0.0102079 -0.0329973 -0.2790813 # ... # if (self.section == "Molecular Orbitals") and (line.strip() == "==> Molecular Orbitals <=="): self.skip_line(inputfile, 'blank') mocoeffs = [] indices = next(inputfile) while indices.strip(): indices = [int(i) for i in indices.split()] if len(mocoeffs) < indices[-1]: for i in range(len(indices)): mocoeffs.append([]) else: assert len(mocoeffs) == indices[-1] self.skip_line(inputfile, 'blank') line = next(inputfile) while line.strip(): iao = int(line.split()[0]) coeffs = [float(c) for c in line.split()[1:]] for i,c in enumerate(coeffs): mocoeffs[indices[i]-1].append(c) line = next(inputfile) energies = next(inputfile) symmetries = next(inputfile) occupancies = next(inputfile) self.skip_lines(inputfile, ['b', 'b']) indices = next(inputfile) if not hasattr(self, 'mocoeffs'): self.mocoeffs = [] self.mocoeffs.append(mocoeffs) # The formats for Mulliken and Lowdin atomic charges are the same, just with # the name changes, so use the same code for both. # # Properties computed using the SCF density density matrix # Mulliken Charges: (a.u.) # Center Symbol Alpha Beta Spin Total # 1 C 2.99909 2.99909 0.00000 0.00182 # 2 C 2.99909 2.99909 0.00000 0.00182 # ... for pop_type in ["Mulliken", "Lowdin"]: if line.strip() == "%s Charges: (a.u.)" % pop_type: if not hasattr(self, 'atomcharges'): self.atomcharges = {} header = next(inputfile) line = next(inputfile) while not line.strip(): line = next(inputfile) charges = [] while line.strip(): ch = float(line.split()[-1]) charges.append(ch) line = next(inputfile) self.atomcharges[pop_type.lower()] = charges mp_trigger = "MP2 Total Energy (a.u.)" if line.strip()[:len(mp_trigger)] == mp_trigger: self.theory = "MP2" mpenergy = utils.convertor(float(line.split()[-1]), 'hartree', 'eV') if not hasattr(self, 'mpenergies'): self.mpenergies = [] self.mpenergies.append([mpenergy]) if "DF-MP2 Energies" in line: self.theory = "MP2" line = next(inputfile) line = next(inputfile) line = next(inputfile) line = next(inputfile) line = next(inputfile) line = next(inputfile) line = next(inputfile) mpenergy = utils.convertor(float(line.split()[3]), 'hartree', 'eV') if not hasattr(self, 'mpenergies'): self.mpenergies = [] self.mpenergies.append([mpenergy]) # Note this is just a start and needs to be modified for CCSD(T), etc. ccsd_trigger = "* CCSD total energy" if line.strip()[:len(ccsd_trigger)] == ccsd_trigger: self.theory = "CCSD" ccsd_energy = utils.convertor(float(line.split()[-1]), 'hartree', 'eV') if not hasattr(self, "ccenergis"): self.ccenergies = [] self.ccenergies.append(ccsd_energy) # The geometry convergence targets and values are printed in a table, with the legends # describing the convergence annotation. Probably exact slicing of the line needs # to be done in order to extract the numbers correctly. If there are no values for # a paritcular target it means they are not used (marked also with an 'o'), and in this case # we will set a value of numpy.inf so that any value will be smaller. # # ==> Convergence Check <== # # Measures of convergence in internal coordinates in au. # Criteria marked as inactive (o), active & met (*), and active & unmet ( ). # --------------------------------------------------------------------------------------------- # Step Total Energy Delta E MAX Force RMS Force MAX Disp RMS Disp # --------------------------------------------------------------------------------------------- # Convergence Criteria 1.00e-06 * 3.00e-04 * o 1.20e-03 * o # --------------------------------------------------------------------------------------------- # 2 -379.77675264 -7.79e-03 1.88e-02 4.37e-03 o 2.29e-02 6.76e-03 o ~ # --------------------------------------------------------------------------------------------- # if (self.section == "Convergence Check") and line.strip() == "==> Convergence Check <==": self.skip_lines(inputfile, ['b', 'units', 'comment', 'dash+tilde', 'header', 'dash+tilde']) # These are the position in the line at which numbers should start. starts = [27, 41, 55, 69, 83] criteria = next(inputfile) geotargets = [] for istart in starts: if criteria[istart:istart+9].strip(): geotargets.append(float(criteria[istart:istart+9])) else: geotargets.append(numpy.inf) self.skip_line(inputfile, 'dashes') values = next(inputfile) geovalues = [] for istart in starts: if values[istart:istart+9].strip(): geovalues.append(float(values[istart:istart+9])) # This assertion may be too restrictive, but we haven't seen the geotargets change. # If such an example comes up, update the value since we're interested in the last ones. if not hasattr(self, 'geotargets'): self.geotargets = geotargets else: assert self.geotargets == geotargets if not hasattr(self, 'geovalues'): self.geovalues = [] self.geovalues.append(geovalues) # This message signals a converged optimization, in which case we want # to append the index for this step to optdone, which should be equal # to the number of geovalues gathered so far. if line.strip() == "**** Optimization is complete! ****": if not hasattr(self, 'optdone'): self.optdone = [] self.optdone.append(len(self.geovalues)) # This message means that optimization has stopped for some reason, but we # still want optdone to exist in this case, although it will be an empty list. if line.strip() == "Optimizer: Did not converge!": if not hasattr(self, 'optdone'): self.optdone = [] # The reference point at which properties are evaluated in Psi4 is explicitely stated, # so we can save it for later. It is not, however, a part of the Properties section, # but it appears before it and also in other places where properies that might depend # on it are printed. # # Properties will be evaluated at 0.000000, 0.000000, 0.000000 Bohr # if (self.version == 4) and ("Properties will be evaluated at" in line.strip()): self.reference = numpy.array([float(x.strip(',')) for x in line.split()[-4:-1]]) assert line.split()[-1] == "Bohr" self.reference = utils.convertor(self.reference, 'bohr', 'Angstrom') else: self.reference = [0.0]*3 # The properties section print the molecular dipole moment: # # ==> Properties <== # # #Properties computed using the SCF density density matrix # Nuclear Dipole Moment: (a.u.) # X: 0.0000 Y: 0.0000 Z: 0.0000 # # Electronic Dipole Moment: (a.u.) # X: 0.0000 Y: 0.0000 Z: 0.0000 # # Dipole Moment: (a.u.) # X: 0.0000 Y: 0.0000 Z: 0.0000 Total: 0.0000 # if (self.section == "Properties") and line.strip() == "Dipole Moment: (a.u.)": line = next(inputfile) dipole = numpy.array([float(line.split()[1]), float(line.split()[3]), float(line.split()[5])]) dipole = utils.convertor(dipole, "ebohr", "Debye") if not hasattr(self, 'moments'): self.moments = [self.reference, dipole] else: try: assert numpy.all(self.moments[1] == dipole) except AssertionError: self.logger.warning('Overwriting previous multipole moments with new values') self.logger.warning('This could be from post-HF properties or geometry optimization') self.moments = [self.reference, dipole] # Higher multipole moments are printed separately, on demand, in lexicographical order. # # Multipole Moments: # # ------------------------------------------------------------------------------------ # Multipole Electric (a.u.) Nuclear (a.u.) Total (a.u.) # ------------------------------------------------------------------------------------ # # L = 1. Multiply by 2.5417462300 to convert to Debye # Dipole X : 0.0000000 0.0000000 0.0000000 # Dipole Y : 0.0000000 0.0000000 0.0000000 # Dipole Z : 0.0000000 0.0000000 0.0000000 # # L = 2. Multiply by 1.3450341749 to convert to Debye.ang # Quadrupole XX : -1535.8888701 1496.8839996 -39.0048704 # Quadrupole XY : -11.5262958 11.4580038 -0.0682920 # ... # if line.strip() == "Multipole Moments:": self.skip_lines(inputfile, ['b', 'd', 'header', 'd', 'b']) # The reference used here should have been printed somewhere # before the properties and parsed above. moments = [self.reference] line = next(inputfile) while "----------" not in line.strip(): rank = int(line.split()[2].strip('.')) multipole = [] line = next(inputfile) while line.strip(): value = float(line.split()[-1]) fromunits = "ebohr" + (rank>1)*("%i" % rank) tounits = "Debye" + (rank>1)*".ang" + (rank>2)*("%i" % (rank-1)) value = utils.convertor(value, fromunits, tounits) multipole.append(value) line = next(inputfile) multipole = numpy.array(multipole) moments.append(multipole) line = next(inputfile) if not hasattr(self, 'moments'): self.moments = moments else: for im,m in enumerate(moments): if len(self.moments) <= im: self.moments.append(m) else: assert numpy.all(self.moments[im] == m) # We can also get some higher moments in Psi3, although here the dipole is not printed # separately and the order is not lexicographical. However, the numbers seem # kind of strange -- the quadrupole seems to be traceless, although I'm not sure # whether the standard transformation has been used. So, until we know what kind # of moment these are and how to make them raw again, we will only parse the dipole. # # -------------------------------------------------------------- # *** Electric multipole moments *** # -------------------------------------------------------------- # # CAUTION : The system has non-vanishing dipole moment, therefore # quadrupole and higher moments depend on the reference point. # # -Coordinates of the reference point (a.u.) : # x y z # -------------------- -------------------- -------------------- # 0.0000000000 0.0000000000 0.0000000000 # # -Electric dipole moment (expectation values) : # # mu(X) = -0.00000 D = -1.26132433e-43 C*m = -0.00000000 a.u. # mu(Y) = 0.00000 D = 3.97987832e-44 C*m = 0.00000000 a.u. # mu(Z) = 0.00000 D = 0.00000000e+00 C*m = 0.00000000 a.u. # |mu| = 0.00000 D = 1.32262368e-43 C*m = 0.00000000 a.u. # # -Components of electric quadrupole moment (expectation values) (a.u.) : # # Q(XX) = 10.62340220 Q(YY) = 1.11816843 Q(ZZ) = -11.74157063 # Q(XY) = 3.64633112 Q(XZ) = 0.00000000 Q(YZ) = 0.00000000 # if (self.version == 3) and line.strip() == "*** Electric multipole moments ***": self.skip_lines(inputfile, ['d', 'b', 'caution1', 'caution2', 'b']) coordinates = next(inputfile) assert coordinates.split()[-2] == "(a.u.)" self.skip_lines(inputfile, ['xyz', 'd']) line = next(inputfile) self.reference = numpy.array([float(x) for x in line.split()]) self.reference = utils.convertor(self.reference, 'bohr', 'Angstrom') self.skip_line(inputfile, "blank") line = next(inputfile) assert "Electric dipole moment" in line self.skip_line(inputfile, "blank") # Make sure to use the column that has the value in Debyes. dipole = [] for i in range(3): line = next(inputfile) dipole.append(float(line.split()[2])) if not hasattr(self, 'moments'): self.moments = [self.reference, dipole] else: assert self.moments[1] == dipole ## Harmonic frequencies. # ------------------------------------------------------------- # Computing second-derivative from gradients using projected, # symmetry-adapted, cartesian coordinates (fd_freq_1). # 74 gradients passed in, including the reference geometry. # Generating complete list of displacements from unique ones. # Operation 2 takes plus displacements of irrep Bg to minus ones. # Operation 3 takes plus displacements of irrep Au to minus ones. # Operation 2 takes plus displacements of irrep Bu to minus ones. # Irrep Harmonic Frequency # (cm-1) # ----------------------------------------------- # Au 137.2883 if line.strip() == 'Irrep Harmonic Frequency': vibsyms = [] vibfreqs = [] self.skip_lines(inputfile, ['(cm-1)', 'dashes']) ## The first section contains the symmetry of each normal ## mode and its frequency. line = next(inputfile) while '---' not in line: chomp = line.split() vibsym = chomp[0] vibfreq = Psi.parse_vibfreq(chomp[1]) vibsyms.append(vibsym) vibfreqs.append(vibfreq) line = next(inputfile) self.set_attribute('vibsyms', vibsyms) self.set_attribute('vibfreqs', vibfreqs) line = next(inputfile) assert line.strip() == '' line = next(inputfile) assert 'Normal Modes' in line line = next(inputfile) assert 'Molecular mass is' in line if hasattr(self, 'atommasses'): assert abs(float(line.split()[3]) - sum(self.atommasses)) < 1.0e-4 line = next(inputfile) assert line.strip() == 'Frequencies in cm^-1; force constants in au.' line = next(inputfile) assert line.strip() == '' line = next(inputfile) ## The second section contains the frequency, force ## constant, and displacement for each normal mode, along ## with the atomic masses. # Normal Modes (non-mass-weighted). # Molecular mass is 130.07825 amu. # Frequencies in cm^-1; force constants in au. # Frequency: 137.29 # Force constant: 0.0007 # X Y Z mass # C 0.000 0.000 0.050 12.000000 # C 0.000 0.000 0.050 12.000000 for vibfreq in self.vibfreqs: _vibfreq = Psi.parse_vibfreq(line[13:].strip()) assert abs(vibfreq - _vibfreq) < 1.0e-2 line = next(inputfile) # Can't do anything with this for now. assert 'Force constant:' in line line = next(inputfile) assert 'X Y Z mass' in line line = next(inputfile) if not hasattr(self, 'vibdisps'): self.vibdisps = [] normal_mode_disps = [] # for k in range(self.natom): while line.strip(): chomp = line.split() # Do nothing with this for now. atomsym = chomp[0] atomcoords = [float(x) for x in chomp[1:4]] # Do nothing with this for now. atommass = float(chomp[4]) normal_mode_disps.append(atomcoords) line = next(inputfile) self.vibdisps.append(normal_mode_disps) line = next(inputfile) @staticmethod def parse_vibfreq(vibfreq): """Imaginary frequencies are printed as '12.34i', rather than '-12.34'. """ is_imag = vibfreq[-1] == 'i' if is_imag: return -float(vibfreq[:-1]) else: return float(vibfreq) if __name__ == "__main__": import doctest, psiparser doctest.testmod(psiparser, verbose=False)
ChemSem/cclib
src/cclib/parser/psiparser.py
Python
lgpl-2.1
45,115
[ "NWChem", "Psi4", "cclib" ]
54bbb2a577ed38fb9ee1e1aad7f328d58d3200f09b7f920e6488101e8ba21e8e
from google.protobuf import text_format from .caffe import get_caffe_resolver from .errors import KaffeError, print_stderr from .layers import LayerAdapter, LayerType, NodeKind, NodeDispatch from .shapes import TensorShape class Node(object): def __init__(self, name, kind, layer=None): self.name = name self.kind = kind self.layer = LayerAdapter(layer, kind) if layer else None self.parents = [] self.children = [] self.data = None self.output_shape = None self.metadata = {} def add_parent(self, parent_node): assert parent_node not in self.parents self.parents.append(parent_node) if self not in parent_node.children: parent_node.children.append(self) def add_child(self, child_node): assert child_node not in self.children self.children.append(child_node) if self not in child_node.parents: child_node.parents.append(self) def get_only_parent(self): if len(self.parents) != 1: raise KaffeError('Node (%s) expected to have 1 parent. Found %s.' % (self, len(self.parents))) return self.parents[0] @property def parameters(self): if self.layer is not None: return self.layer.parameters return None def __str__(self): return '[%s] %s' % (self.kind, self.name) def __repr__(self): return '%s (0x%x)' % (self.name, id(self)) class Graph(object): def __init__(self, nodes=None, name=None): self.nodes = nodes or [] self.node_lut = {node.name: node for node in self.nodes} self.name = name def add_node(self, node): self.nodes.append(node) self.node_lut[node.name] = node def get_node(self, name): try: return self.node_lut[name] except KeyError: raise KaffeError('Layer not found: %s' % name) def get_input_nodes(self): return [node for node in self.nodes if len(node.parents) == 0] def get_output_nodes(self): return [node for node in self.nodes if len(node.children) == 0] def topologically_sorted(self): sorted_nodes = [] unsorted_nodes = list(self.nodes) temp_marked = set() perm_marked = set() def visit(node): if node in temp_marked: raise KaffeError('Graph is not a DAG.') if node in perm_marked: return temp_marked.add(node) for child in node.children: visit(child) perm_marked.add(node) temp_marked.remove(node) sorted_nodes.insert(0, node) while len(unsorted_nodes): visit(unsorted_nodes.pop()) return sorted_nodes def compute_output_shapes(self): sorted_nodes = self.topologically_sorted() for node in sorted_nodes: node.output_shape = TensorShape(*NodeKind.compute_output_shape(node)) def replaced(self, new_nodes): return Graph(nodes=new_nodes, name=self.name) def transformed(self, transformers): graph = self for transformer in transformers: graph = transformer(graph) if graph is None: raise KaffeError('Transformer failed: {}'.format(transformer)) assert isinstance(graph, Graph) return graph def __contains__(self, key): return key in self.node_lut def __str__(self): hdr = '{:<20} {:<30} {:>20} {:>20}'.format('Type', 'Name', 'Param', 'Output') s = [hdr, '-' * 94] for node in self.topologically_sorted(): # If the node has learned parameters, display the first one's shape. # In case of convolutions, this corresponds to the weights. data_shape = node.data[0].shape if node.data else '--' out_shape = node.output_shape or '--' s.append('{:<20} {:<30} {:>20} {:>20}'.format(node.kind, node.name, data_shape, tuple(out_shape))) return '\n'.join(s) class GraphBuilder(object): '''Constructs a model graph from a Caffe protocol buffer definition.''' def __init__(self, def_path, phase='test'): ''' def_path: Path to the model definition (.prototxt) data_path: Path to the model data (.caffemodel) phase: Either 'test' or 'train'. Used for filtering phase-specific nodes. ''' self.def_path = def_path self.phase = phase self.load() def load(self): '''Load the layer definitions from the prototxt.''' self.params = get_caffe_resolver().NetParameter() with open(self.def_path, 'rb') as def_file: text_format.Merge(def_file.read(), self.params) def filter_layers(self, layers): '''Filter out layers based on the current phase.''' phase_map = {0: 'train', 1: 'test'} filtered_layer_names = set() filtered_layers = [] for layer in layers: phase = self.phase if len(layer.include): phase = phase_map[layer.include[0].phase] if len(layer.exclude): phase = phase_map[1 - layer.include[0].phase] exclude = (phase != self.phase) # Dropout layers appear in a fair number of Caffe # test-time networks. These are just ignored. We'll # filter them out here. if (not exclude) and (phase == 'test'): exclude = (layer.type == LayerType.Dropout) if not exclude: filtered_layers.append(layer) # Guard against dupes. assert layer.name not in filtered_layer_names filtered_layer_names.add(layer.name) return filtered_layers def make_node(self, layer): '''Create a graph node for the given layer.''' kind = NodeKind.map_raw_kind(layer.type) if kind is None: raise KaffeError('Unknown layer type encountered: %s' % layer.type) # We want to use the layer's top names (the "output" names), rather than the # name attribute, which is more of readability thing than a functional one. # Other layers will refer to a node by its "top name". return Node(layer.name, kind, layer=layer) def make_input_nodes(self): ''' Create data input nodes. This method is for old-style inputs, where the input specification was not treated as a first-class layer in the prototext. Newer models use the "Input layer" type. ''' nodes = [Node(name, NodeKind.Data) for name in self.params.input] if len(nodes): input_dim = map(int, self.params.input_dim) if not input_dim: if len(self.params.input_shape) > 0: input_dim = map(int, self.params.input_shape[0].dim) else: raise KaffeError('Dimensions for input not specified.') for node in nodes: node.output_shape = tuple(input_dim) return nodes def build(self): ''' Builds the graph from the Caffe layer definitions. ''' # Get the layers layers = self.params.layers or self.params.layer # Filter out phase-excluded layers layers = self.filter_layers(layers) # Get any separately-specified input layers nodes = self.make_input_nodes() nodes += [self.make_node(layer) for layer in layers] # Initialize the graph graph = Graph(nodes=nodes, name=self.params.name) # Connect the nodes # # A note on layers and outputs: # In Caffe, each layer can produce multiple outputs ("tops") from a set of inputs # ("bottoms"). The bottoms refer to other layers' tops. The top can rewrite a bottom # (in case of in-place operations). Note that the layer's name is not used for establishing # any connectivity. It's only used for data association. By convention, a layer with a # single top will often use the same name (although this is not required). # # The current implementation only supports single-output nodes (note that a node can still # have multiple children, since multiple child nodes can refer to the single top's name). node_outputs = {} for layer in layers: node = graph.get_node(layer.name) for input_name in layer.bottom: assert input_name != layer.name parent_node = node_outputs.get(input_name) if (parent_node is None) or (parent_node == node): parent_node = graph.get_node(input_name) node.add_parent(parent_node) if len(layer.top)>1: raise KaffeError('Multiple top nodes are not supported.') for output_name in layer.top: if output_name == layer.name: # Output is named the same as the node. No further action required. continue # There are two possibilities here: # # Case 1: output_name refers to another node in the graph. # This is an "in-place operation" that overwrites an existing node. # This would create a cycle in the graph. We'll undo the in-placing # by substituting this node wherever the overwritten node is referenced. # # Case 2: output_name violates the convention layer.name == output_name. # Since we are working in the single-output regime, we will can rename it to # match the layer name. # # For both cases, future references to this top re-routes to this node. node_outputs[output_name] = node graph.compute_output_shapes() return graph class NodeMapper(NodeDispatch): def __init__(self, graph): self.graph = graph def map(self): nodes = self.graph.topologically_sorted() # Remove input nodes - we'll handle them separately. input_nodes = self.graph.get_input_nodes() nodes = [t for t in nodes if t not in input_nodes] # Decompose DAG into chains. chains = [] for node in nodes: attach_to_chain = None if len(node.parents) == 1: parent = node.get_only_parent() for chain in chains: if chain[-1] == parent: # Node is part of an existing chain. attach_to_chain = chain break if attach_to_chain is None: # Start a new chain for this node. attach_to_chain = [] chains.append(attach_to_chain) attach_to_chain.append(node) # Map each chain. mapped_chains = [] for chain in chains: mapped_chains.append(self.map_chain(chain)) return self.commit(mapped_chains) def map_chain(self, chain): return [self.map_node(node) for node in chain] def map_node(self, node): map_func = self.get_handler(node.kind, 'map') mapped_node = map_func(node) assert mapped_node is not None mapped_node.node = node return mapped_node def commit(self, mapped_chains): raise NotImplementedError('Must be implemented by subclass.')
lexionbear/mlmodels
tensorflow/libs/caffe-tensorflow/kaffe/graph.py
Python
mit
11,653
[ "VisIt" ]
c694be34f620ccb4bec88d85d41dc1b63d6166ca613412957bec66e901cf502f
########################################################################### # # This program is part of Zenoss Core, an open source monitoring platform. # Copyright (C) 2007, Zenoss Inc. # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License version 2 as published by # the Free Software Foundation. # # For complete information please visit: http://www.zenoss.com/oss/ # ########################################################################### from Products.DataCollector.plugins.CollectorPlugin import SnmpPlugin, GetMap class MrvL3DeviceMap(SnmpPlugin): """Map mib elements from Mrv switch mib to get hw and os products. """ maptype = "MrvL3DeviceMap" snmpGetMap = GetMap({ # '.1.2.840.10036.3.1.2.1.2.2' : 'manufacturer', '.1.3.6.1.4.1.629.6.10.75.1.1.5.1.0' : 'setHWProductKey', '.1.3.6.1.4.1.629.6.10.75.1.1.3.1.10.1' : 'setHWSerialNumber', '.1.3.6.1.4.1.629.6.10.75.1.1.3.1.6.1': 'setOSProductKey', }) def process(self, device, results, log): """collect snmp information from this device""" log.info('processing %s for device %s', self.name(), device.id) getdata, tabledata = results if getdata['setHWProductKey'] is None: return None om = self.objectMap(getdata) return om
anksp21/Community-Zenpacks
ZenPacks.AndreaConsadori.MRV/ZenPacks/AndreaConsadori/MRV/modeler/plugins/MrvL3DeviceMap.py
Python
gpl-2.0
1,374
[ "VisIt" ]
fdcce548f7812f70de9ae1c53f9d9657752477c9a10bb9452e4c245cc898ecf3
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase import vtk class vtkMultiGroupDataGroupFilter(SimpleVTKClassModuleBase): def __init__(self, module_manager): SimpleVTKClassModuleBase.__init__( self, module_manager, vtk.vtkMultiGroupDataGroupFilter(), 'Processing.', ('vtkDataObject',), ('vtkMultiGroupDataSet',), replaceDoc=True, inputFunctions=None, outputFunctions=None)
chrisidefix/devide
modules/vtk_basic/vtkMultiGroupDataGroupFilter.py
Python
bsd-3-clause
522
[ "VTK" ]
49559128602a7a70392920fb2f5b692ac8a6423524361a30a8ff8aa762b70164
""" @name: Modules/House/Entertainment/_test/test_entertainment_utility.py @author: D. Brian Kimmel @contact: D.BrianKimmel@gmail.com @copyright: (c) 2019-2019 by D. Brian Kimmel @license: MIT License @note: Created on Dec 25, 2019 @summary: Test Passed all 13 tests - DBK - 2019-12-25 """ __updated__ = '2019-12-25' # Import system type stuff from twisted.trial import unittest from ruamel.yaml import YAML import json # Import PyMh files from _test.testing_mixin import SetupPyHouseObj from Modules.House.Entertainment import entertainment_utility as E_U from Modules.Core.Utilities import json_tools from Modules.Core.Utilities.debug_tools import PrettyFormatAny TEST_YAML = """\ UnitType: 1 ControlCommands: Power: - PWR - PWZ Volume: - MVL - ZVL Mute: - AMT - ZMT InputSelection: - SLI - SLZ Arguments: Power: 'Off': '00' 'On': '01' '?': 'QSTN' Volume: 'Up': 'UP' 'Down': 'DOWN' '?': 'QSTN' InputSelection: 'Video1': '00' 'Cbl/Sat': '01' # 'VIDEO2', 'CBL/SAT' 'Game': '02' # 'VIDEO3', 'GAME/TV', 'GAME', 'GAME1' 'Aux': '03' # 'VIDEO4', 'AUX1(AUX)' 'Pc': '05' # 'VIDEO6', 'PC' 'Bd/Dvd': '10' # 'DVD', 'BD/DVD' 'Strmbox': '11' # 'STRM BOX' 'TV': '12' # 'TV' 'Phono': '22' # 'PHONO' 'Cd': '23' # 'CD', 'TV/CD' 'Fm': '24' # FM + PRS 00 + TUN 10330 + PR3 00 + TU3 10330 'Am': '25' # AM + PRS 00 + TUN 00830 + PR3 00 + TU3 00830 'BlueTooth': '2E' 'Network': '2B' Zones: 0: Main 1: Lanai """ class SetupMixin(object): def setUp(self): self.m_pyhouse_obj = SetupPyHouseObj().BuildPyHouseObj() l_yaml = YAML() self.m_yaml = l_yaml.load(TEST_YAML) class A0(unittest.TestCase): def test_00_Print(self): _x = PrettyFormatAny.form('_test', 'so PrettyFormatAny is defined') print('Id: test_entertainment_utility') class C1_Setup(SetupMixin, unittest.TestCase): """Test that we have set up properly for the rest of the testing classes. """ def setUp(self): SetupMixin.setUp(self) def test_1_BuildObjects(self): """ """ # print('C1-01-A - Unit type: {}'.format(self.m_yaml)) l_dict = E_U.extract_device_config_file(self.m_yaml) # print(PrettyFormatAny.form(l_dict, 'C1-01-B - Dict')) l_json = json.dumps(l_dict, indent=4) print('C1-01-C - JSON: {}'.format(l_json)) # ## END DBK
DBrianKimmel/PyHouse
Project/src/Modules/House/Entertainment/_test/test_entertainment_utility.py
Python
mit
2,632
[ "Brian" ]
8c60b61d9dfe9a7617879f5c893cb8a7cc4e0d70e88333fb42d430e93dc27022
#!/usr/bin/env python """Test plotting of various GoSubDag options.""" __copyright__ = "Copyright (C) 2016-2017, DV Klopfenstein, H Tang. All rights reserved." __author__ = "DV Klopfenstein" import os import sys from goatools.base import get_godag from goatools.base import dnld_gaf from goatools.associations import read_gaf from goatools.semantic import TermCounts from goatools.gosubdag.gosubdag import GoSubDag from goatools.gosubdag.plot.gosubdag_plot import GoSubDagPlot REPO = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../") # TBD MOVE TO GOATOOLS TEST PKG class Run(object): """Objects for running plotting test.""" def __init__(self, obo, gaf, prt): self.prt = prt self.cwd = os.getcwd() # Gene Ontologies self.go2obj_all = get_godag(os.path.join(REPO, "../goatools/", obo)) # Annotations #_file_gaf = dnld_gaf(os.path.join(REPO, gaf)) _file_gaf = dnld_gaf(gaf) print("GAF: {GAF}\n".format(GAF=_file_gaf)) self.gene2gos = read_gaf(_file_gaf) self.tcntobj = TermCounts(self.go2obj_all, self.gene2gos) # GoSubDag self.gosubdag_all = GoSubDag(None, self.go2obj_all, tcntobj=self.tcntobj, prt=prt) self.prtfmt = self.gosubdag_all.prt_attr['fmta'] def prt_goids_all(self, prt): """Print all GO IDs, including alternate GO IDs, in GODag.""" self.gosubdag_all.prt_goids(prtfmt=self.prtfmt, prt=prt) def plt_goids(self, fout_img, go_sources): """Plot GO IDs.""" # % src/bin/go_plot.py GOs --obo=../goatools/data/i86.obo --outfile=t00.jpg --mark_alt_id gosubdag = GoSubDag(go_sources, self.gosubdag_all.go2obj, prt=self.prt, # rcntobj=False, rcntobj=self.gosubdag_all.rcntobj, go2nt=self.gosubdag_all.go2nt) prtfmt = gosubdag.prt_attr['fmta'] goids_plt = GoSubDagPlot(gosubdag).get_goids_plt() self.prt.write("\n{N} GO IDs\n".format(N=len(goids_plt))) gosubdag.prt_goids(goids_plt, prtfmt=prtfmt, prt=self.prt) objplt = GoSubDagPlot(gosubdag, mark_alt_id=True) objplt.plt_dag(os.path.join(self.cwd, fout_img)) def test_plotgosubdag(prt=sys.stdout): """Test plotting of various GoSubDag options.""" objrun = Run("data/i86.obo", "goa_human", prt) # objrun.prt_goids_all(prt) go_sources = set([ 'GO:0000004', # a BP 15 L00 D00 biological_process 'GO:0008151', # a BP 10 L01 D01 B cellular process 'GO:0007516', # BP 0 L04 D05 ABC hemocyte development 'GO:0036476']) # BP 0 L06 D06 AB neuron death in response to hydrogen peroxide objrun.plt_goids("test_gosubdag_tcntobj.png", go_sources) # kws_exp = [ # ({}, {'rcntobj':rcntobj}), # ({'rcntobj':None}, {'rcntobj':None}), # ({'rcntobj':False}, {'rcntobj':None}), # ({'rcntobj':True}, {'rcntobj':rcntobj}), # ({'rcntobj':rcntobj}, {'rcntobj':rcntobj}), # # #({}, {'tcntobj':tcntobj}), # #({'tcntobj':None}, {'tcntobj':None}), # #({'tcntobj':False}, {'tcntobj':None}), # #({'tcntobj':True}, {'tcntobj':tcntobj}), # #({'tcntobj':tcntobj}, {'tcntobj':tcntobj}), # ] # for idx, (kws, expected_fields) in enumerate(kws_exp): # gosubdag = GoSubDag(go_sources, objrun.go2obj_all, prt=prt, **kws) # _chk_obj(getattr(gosubdag, 'rcntobj'), expected_fields['rcntobj'], CountRelatives) # # def _chk_obj(act_obj, exp_obj, cls): # """Check that object creation agrees with expected results.""" # if exp_obj is None: # assert act_obj is None # else: # assert isinstance(act_obj, cls) # # # def _chk_rcntobj(idx, kws, gosubdag, expected_fields): # # """Check that an rcntobj was created or not created.""" # # print idx, kws, expected_fields, gosubdag.rcntobj # # # # goids = kws['GO'] if 'GO' in kws else set(get_go2obj_unique(go2obj)) # # # print('CLI: FAST GoSubDag 0 -------------------') # # # gosubdag = GoSubDag(goids, go2obj, rcntobj=False) # # # print('CLI: RCNTOBJ({})'.format(gosubdag.rcntobj)) # # # gosubdag.prt_goids() # # # print('CLI: FAST GoSubDag 1 -------------------') # # # tcntobj = self._get_tcntobj(kws, gosubdag.go2obj) # # # print('CLI: TermCounts INITed -------------------') # # # self.gosubdag = GoSubDag(goids, go2obj, tcntobj=tcntobj) # # # # self.gosubdag.prt_goids() # # # print('CLI: FAST GoSubDag 2 -------------------') # # # objcolor = Go2Color(self.gosubdag, None) # # # objplt = GoSubDagPlot(self.gosubdag, Go2Color=objcolor, **kws) # # # fout_img = self.get_outfile(goids, **kws) # # # objplt.plt_dag(fout_img) if __name__ == '__main__': test_plotgosubdag() # Copyright (C) 2016-2017, DV Klopfenstein, H Tang. All rights reserved.
tanghaibao/goatools
tests/test_gosubdag_tcntobj.py
Python
bsd-2-clause
5,009
[ "NEURON" ]
7b1aa0e8f54374496d15fb6f8ed186c7aa764be11fea404e3b660e5e6528c4aa
''' Created on Feb 19, 2013 List of parsers. Each one is specific to the version of log. @author: tcpan ''' import re, pprint class LogParserBase(): ''' this is the base class. all other parsers inherit from this supports streaming so only parse one line at a time. ''' pp = pprint.PrettyPrinter(indent=4) experiment = {} def __init__(self,filename): ''' Constructor ''' self.parseFilename(filename) def parseFilename(self, filename): ''' parsing filename ''' # initialize self.experiment['filename'] = filename self.experiment['type'] = "synth" if "syntest" in filename else "TCGA GBM" self.experiment['layout'] = "separate" self.experiment['nMaster'] = 0 if "push" in filename else 1 self.experiment['nCompute'] = -1 self.experiment['nRead'] = -1 self.experiment['nWrite'] = -1 self.experiment['transport'] = "opencv" self.experiment['bufferSize'] = 1 self.experiment['ioGroupSize'] = -1 self.experiment['dataSize'] = 4096 self.experiment['blocking'] = True self.experiment['compression'] = False self.experiment['ost'] = True if ".ost." in filename else False # parse the filename if "kfs" in filename: self.experiment['sys'] = "kfs" elif "kids" in filename: self.experiment['sys'] = "kids" elif "jaguar" in filename: self.experiment['sys'] = "jaguar" elif "titan" in filename: self.experiment['sys'] = "titan" else: self.experiment['sys'] = "keeneland" if .+\/synthetic\.datasizes\.p([0-9]+)\.push\.([^\.]+)\.([0-9]+)$ # submit the parameter to the database, get back the unique experiment id. def parseLine(self, line, handler): ''' parses one line of the log file. takes a handler to upload data to database ''' print "base parser" def submitToDB(self, data, handler): ''' submit to database ''' self.pp.pprint(data); class LogParserV2_1(LogParserBase): ''' this is log parser for v 2.1. supports streaming so only parse one line at a time. ''' def parseLine(self, line, handler): ''' parses one line of the log file. takes a handler to upload data to database ''' # check for empty line l = line.strip(); if len(l) == 0: return # get the process id event = {} p=re.compile('pid,(\d+),hostName,([^,]+),group,(\d+),sessionName,([^,]+),') m=p.match(l) if m: event['pid'] = m.group(1) event['hostname'] = m.group(2) event['group'] = m.group(3) event['sessionname'] = m.group(4) else: print "no match: {0}".format(l) return # now extract the events p = re.compile('([^,]+),(\d+),(\d+),(\d+),(\d*),') it = p.finditer(l) for m in it: event['name'] = m.group(1) event['type'] = m.group(2) event['start'] = m.group(3) event['stop'] = m.group(4) event['attr'] = m.group(5) self.submitToDB(event, handler) class LogParserV2(LogParserBase): ''' this is log parser for v 2.1. supports streaming so only parse one line at a time. ''' def parseLine(self, line, handler): ''' parses one line of the log file. takes a handler to upload data to database ''' # check for empty line l = line.strip(); if len(l) == 0: return event = {} # get the process id p=re.compile('pid,(\d+),hostName,([^,]+),sessionName,([^,]+),') m=p.match(l) if m: event['pid'] = m.group(1) event['hostname'] = m.group(2) event['sessionname'] = m.group(3) else: print "no match: {0}".format(l) return # now extract the events p = re.compile('([^,]+),(\d+),(\d+),(\d+),(\d*),') it = p.finditer(l) for m in it: event['name'] = m.group(1) event['type'] = m.group(2) event['start'] = m.group(3) event['stop'] = m.group(4) event['attr'] = m.group(5) self.submitToDB(event, handler) class LogParserV1(LogParserBase): ''' this is log parser for v 2.1. supports streaming so only parse one line at a time. ''' def parseLine(self, line, handler): ''' parses one line of the log file. takes a handler to upload data to database ''' # check for empty line l = line.strip(); if len(l) == 0: return event = {} # get the process id p=re.compile('pid,(\d+),hostName,([^,]+),sessionName,([^,]+),') m=p.match(l) if m: event['pid'] = m.group(1) event['hostname'] = m.group(2) event['sessionname'] = m.group(3) else: print "no match: {0}".format(l) return # now extract the events p = re.compile('([^,]+),(\d+),(\d+),(\d+),') it = p.finditer(l) for m in it: event['name'] = m.group(1) event['type'] = m.group(2) event['start'] = m.group(3) event['stop'] = m.group(4) self.submitToDB(event, handler)
EmoryUniversity/PIAT
src/common/log-analysis/python-discard/LogParser/LogParsers.py
Python
lgpl-3.0
5,971
[ "Jaguar" ]
2434853c1a1ba57961b7012ad36acfd3ad1e1fd78d1fbc89e9afeb3b6fb2b1c3
# -*- coding: utf-8 -*- # vi:si:et:sw=4:sts=4:ts=4 ## ## Copyright (C) 2005-2013 Async Open Source <http://www.async.com.br> ## All rights reserved ## ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU Lesser General Public License as published by ## the Free Software Foundation; either version 2 of the License, or ## (at your option) any later version. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU Lesser General Public License for more details. ## ## You should have received a copy of the GNU Lesser General Public License ## along with this program; if not, write to the Free Software ## Foundation, Inc., or visit: http://www.gnu.org/. ## ## Author(s): Stoq Team <stoq-devel@async.com.br> ## """A list of all tables in database and a way to get them. Add new tables here: ('domain.modulo') : ['classA', 'classB', ...], module is the domain module which lives the classes in the list (classA, classB, ...). """ import collections import logging import os from kiwi.python import namedAny from stoqlib.lib.pluginmanager import get_plugin_manager from stoqlib.lib.translation import stoqlib_gettext _ = stoqlib_gettext log = logging.getLogger(__name__) _tables = [ ('system', ["SystemTable", "TransactionEntry"]), ('parameter', ["ParameterData"]), ('account', ['Account', 'AccountTransaction', 'BankAccount', 'BillOption']), ('profile', ["UserProfile", "ProfileSettings"]), ('person', ["Person"]), ('address', ["CityLocation", "Address"]), ('person', ["EmployeeRole", "WorkPermitData", "MilitaryData", "VoterData", "ContactInfo", "LoginUser", "Calls", "Individual", "Company", "Client", "Supplier", "Employee", "Branch", "SalesPerson", "Transporter", "EmployeeRoleHistory", "ClientCategory", "ClientSalaryHistory", "CreditCheckHistory", "UserBranchAccess"]), ('synchronization', ["BranchSynchronization"]), ('station', ["BranchStation"]), ('till', ["Till", "TillEntry"]), ('payment.card', ["CreditProvider", "CreditCardData", 'CardPaymentDevice', 'CardOperationCost']), ('payment.category', ["PaymentCategory"]), ('payment.comment', ["PaymentComment"]), ('payment.group', ["PaymentGroup"]), ('payment.method', ["PaymentMethod", "CheckData"]), ('payment.payment', ["Payment", "PaymentChangeHistory"]), ('payment.renegotiation', ["PaymentRenegotiation"]), ('fiscal', ["CfopData", "FiscalBookEntry", "Invoice"]), ('sale', ["SaleItem", "Delivery", "Sale", 'SaleComment', 'SaleToken']), ('returnedsale', ["ReturnedSale", "ReturnedSaleItem"]), ('sellable', ["SellableUnit", "SellableTaxConstant", "SellableCategory", 'ClientCategoryPrice', "Sellable"]), ('service', ["Service"]), ('product', ["Product", "ProductComponent", "ProductHistory", 'ProductManufacturer', 'ProductQualityTest', "ProductSupplierInfo", 'StockTransactionHistory', "ProductStockItem", "GridGroup", "GridAttribute", "GridOption", "ProductAttribute", "ProductOptionMap", "Storable", 'StorableBatch']), ('purchase', ["PurchaseOrder", "Quotation", "PurchaseItem", "QuoteGroup"]), ('receiving', ["ReceivingOrder", "ReceivingOrderItem", 'PurchaseReceivingMap']), ('devices', ["DeviceSettings", "FiscalDayHistory", "FiscalDayTax"]), ('commission', ["CommissionSource", "Commission"]), ('transfer', ["TransferOrder", "TransferOrderItem"]), ('inventory', ["Inventory", "InventoryItem"]), ('image', ["Image"]), ('attachment', ["Attachment"]), ('stockdecrease', ["StockDecrease", "StockDecreaseItem"]), ('production', ["ProductionOrder", "ProductionItem", "ProductionItemQualityResult", "ProductionMaterial", "ProductionService", "ProductionProducedItem"]), ('loan', ['Loan', 'LoanItem']), ('invoice', ['InvoiceField', 'InvoiceLayout', 'InvoicePrinter']), ('taxes', ['ProductIcmsTemplate', 'ProductIpiTemplate', 'ProductPisTemplate', 'ProductCofinsTemplate', 'ProductTaxTemplate', 'InvoiceItemIcms', 'InvoiceItemIpi']), ('uiform', ['UIForm', 'UIField']), ('plugin', ['InstalledPlugin', 'PluginEgg']), ('costcenter', ['CostCenter', 'CostCenterEntry']), ('stockdecrease', ['StockDecrease', 'StockDecreaseItem']), ('workorder', ['WorkOrder', 'WorkOrderItem', 'WorkOrderCategory', 'WorkOrderPackage', 'WorkOrderPackageItem', 'WorkOrderHistory']), ('event', ['Event']), ] # table name (e.g. "Person") -> class _tables_cache = collections.OrderedDict() def _get_tables_cache(): if _tables_cache: return _tables_cache for path, table_names in _tables: for table_name in table_names: klass = namedAny('stoqlib.domain.%s.%s' % (path, table_name)) _tables_cache[table_name] = klass p_manager = get_plugin_manager() for p_name in p_manager.installed_plugins_names: plugin = p_manager.get_plugin(p_name) for path, table_names in plugin.get_tables(): for table_name in table_names: desc = p_manager.get_description_by_name(p_name) basepath = os.path.basename(desc.dirname) klass = namedAny('.'.join([basepath, path, table_name])) _tables_cache[table_name] = klass return _tables_cache def get_table_type_by_name(table_name): """Gets a table by name. :param table_name: name of the table """ return _get_tables_cache()[table_name] def get_table_types(): return _get_tables_cache().values()
andrebellafronte/stoq
stoqlib/database/tables.py
Python
gpl-2.0
6,846
[ "VisIt" ]
34d6fd58e2931e32a3014dc62cfb6b4bcd86537571c0d70a0866114f31a534f4
# coding: utf-8 # Copyright (c) Pymatgen Development Team. # Distributed under the terms of the MIT License. """ Module containing class to create an ion """ import re from copy import deepcopy import numpy as np from monty.json import MSONable from pymatgen.core.composition import Composition from pymatgen.util.string import formula_double_format, Stringify class Ion(Composition, MSONable, Stringify): """ Basic ion object. It is just a Composition object with an additional variable to store charge. The net charge can either be represented as Mn++, or Mn+2, or Mn[2+]. Note the order of the sign and magnitude in each representation. """ def __init__(self, composition, charge=0.0, properties=None): """ Flexible Ion construction, similar to Composition. For more information, please see pymatgen.core.Composition """ super().__init__(composition) self._charge = charge @classmethod def from_formula(cls, formula: str) -> "Ion": """ Creates Ion from formula. :param formula: :return: Ion """ charge = 0.0 f = formula m = re.search(r"\[([^\[\]]+)\]", f) if m: m_chg = re.search(r"([\.\d]*)([+-])", m.group(1)) if m_chg: if m_chg.group(1) != "": charge += float(m_chg.group(1)) * (float(m_chg.group(2) + "1")) else: charge += float(m_chg.group(2) + "1") f = f.replace(m.group(), "", 1) m = re.search(r"\(aq\)", f) if m: f = f.replace(m.group(), "", 1) for m_chg in re.finditer(r"([+-])([\.\d]*)", f): sign = m_chg.group(1) sgn = float(str(sign + "1")) if m_chg.group(2).strip() != "": charge += float(m_chg.group(2)) * sgn else: charge += sgn f = f.replace(m_chg.group(), "", 1) composition = Composition(f) return cls(composition, charge) @property def formula(self): """ Returns a formula string, with elements sorted by electronegativity, e.g., Li4 Fe4 P4 O16. """ formula = super().formula chg_str = "" if self.charge > 0: chg_str = " +" + formula_double_format(self.charge, False) elif self._charge < 0: chg_str = " " + formula_double_format(self.charge, False) return formula + chg_str @property def anonymized_formula(self): """ An anonymized formula. Appends charge to the end of anonymized composition """ anon_formula = super().anonymized_formula chg = self._charge chg_str = "" if chg > 0: chg_str += "{}{}".format("+", str(int(chg))) elif chg < 0: chg_str += "{}{}".format("-", str(int(np.abs(chg)))) return anon_formula + chg_str @property def reduced_formula(self): """ Returns a reduced formula string with appended charge. """ reduced_formula = super().reduced_formula charge = self._charge / self.get_reduced_composition_and_factor()[1] if charge > 0: if abs(charge) == 1: chg_str = "[+]" else: chg_str = "[" + formula_double_format(charge, False) + "+]" elif charge < 0: if abs(charge) == 1: chg_str = "[-]" else: chg_str = "[{}-]".format(formula_double_format(abs(charge), False)) else: chg_str = "(aq)" return reduced_formula + chg_str @property def alphabetical_formula(self): """ Returns a reduced formula string with appended charge """ alph_formula = super().alphabetical_formula chg_str = "" if self.charge > 0: chg_str = " +" + formula_double_format(self.charge, False) elif self.charge < 0: chg_str = " " + formula_double_format(self.charge, False) return alph_formula + chg_str @property def charge(self): """ Charge of the ion """ return self._charge def as_dict(self): """ Returns: dict with composition, as well as charge """ d = super().as_dict() d["charge"] = self.charge return d @classmethod def from_dict(cls, d): """ Generates an ion object from a dict created by as_dict(). Args: d: {symbol: amount} dict. """ input = deepcopy(d) charge = input.pop("charge") composition = Composition(input) return Ion(composition, charge) @property def to_reduced_dict(self): """ Returns: dict with element symbol and reduced amount e.g., {"Fe": 2.0, "O":3.0}. """ d = self.composition.to_reduced_dict d["charge"] = self.charge return d @property def composition(self): """Composition of ion.""" return Composition(self._data) def __eq__(self, other): if self.composition != other.composition: return False if self.charge != other.charge: return False return True def __add__(self, other): """ Addition of two ions. """ new_composition = self.composition + other.composition new_charge = self.charge + other.charge return Ion(new_composition, new_charge) def __sub__(self, other): """ Subtraction of two ions """ new_composition = self.composition - other.composition new_charge = self.charge - other.charge return Ion(new_composition, new_charge) def __mul__(self, other): """ Multiplication of an Ion with a factor """ new_composition = self.composition * other new_charge = self.charge * other return Ion(new_composition, new_charge) def __hash__(self): return hash((self.composition, self.charge)) def __str__(self): return self.formula def __repr__(self): return "Ion: " + self.formula def to_pretty_string(self) -> str: """ :return: Pretty string with proper superscripts. """ str_ = super().formula if self.charge > 0: str_ += "^+" + formula_double_format(self.charge, False) elif self._charge < 0: str_ += "^" + formula_double_format(self.charge, False) return str_
gmatteo/pymatgen
pymatgen/core/ion.py
Python
mit
6,688
[ "pymatgen" ]
bd367eec457237b8d166ddddda83732a0a6c8f7e3cf78443c1f055f12fe8ada4
# coding: utf-8 # Copyright (c) Pymatgen Development Team. # Distributed under the terms of the MIT License. from pymatgen.core.libxcfunc import LibxcFunc from pymatgen.util.testing import PymatgenTest class LibxcFuncTest(PymatgenTest): def test_libxcfunc_api(self): """Testing libxcfunc_api.""" # LDA correlation: Hedin & Lundqvist xc = LibxcFunc.LDA_C_HL print(xc) assert not xc.is_x_kind and xc.is_c_kind and not xc.is_xc_kind assert xc.is_lda_family and not xc.is_gga_family print(xc.info_dict) assert xc.family in LibxcFunc.all_families() assert xc.kind in LibxcFunc.all_kinds() # Test if object can be serialized with Pickle. self.serialize_with_pickle(xc, test_eq=True) # Test if object supports MSONable self.assertMSONable(xc, test_if_subclass=False)
gmatteo/pymatgen
pymatgen/core/tests/test_libxcfunc.py
Python
mit
875
[ "pymatgen" ]
ba920ce6cb8eedc2b8e4ce70b79d66e17fd22c3cf66d5a5bbab4c5097b8c944d
"""Jupyter Integration.""" import ast import socket from os.path import join as pjoin from urllib.parse import urljoin import json import re import sys import types import time from multiprocessing import Process from IPython import get_ipython from IPython.display import display, HTML, clear_output from IPython.core.error import UsageError from IPython.core.interactiveshell import InteractiveShell from IPython.core.magic import Magics, magics_class, line_magic from nbformat import read from notebook.notebookapp import list_running_servers import ipykernel import requests from bowtie._app import App def get_notebook_name() -> str: """Return the full path of the jupyter notebook. References ---------- https://github.com/jupyter/notebook/issues/1000#issuecomment-359875246 """ kernel_id = re.search( # type: ignore 'kernel-(.*).json', ipykernel.connect.get_connection_file() ).group(1) servers = list_running_servers() for server in servers: response = requests.get(urljoin(server['url'], 'api/sessions'), params={'token': server.get('token', '')}) for session in json.loads(response.text): if session['kernel']['id'] == kernel_id: relative_path = session['notebook']['path'] return pjoin(server['notebook_dir'], relative_path) raise Exception('Noteboook not found.') def load_notebook(fullname: str): """Import a notebook as a module.""" shell = InteractiveShell.instance() path = fullname # load the notebook object with open(path, 'r', encoding='utf-8') as f: notebook = read(f, 4) # create the module and add it to sys.modules mod = types.ModuleType(fullname) mod.__file__ = path # mod.__loader__ = self mod.__dict__['get_ipython'] = get_ipython sys.modules[fullname] = mod # extra work to ensure that magics that would affect the user_ns # actually affect the notebook module's ns save_user_ns = shell.user_ns shell.user_ns = mod.__dict__ try: for cell in notebook.cells: if cell.cell_type == 'code': try: # only run valid python code ast.parse(cell.source) except SyntaxError: continue try: # pylint: disable=exec-used exec(cell.source, mod.__dict__) except NameError: print(cell.source) raise finally: shell.user_ns = save_user_ns return mod @magics_class class BowtieMagic(Magics): """Bowtie magic commands.""" process = None @line_magic def bowtie_stop(self, line=''): # pylint: disable=unused-argument """Terminate Bowtie app.""" if self.process is None: print('No app has been run.') else: print('Terminating Bowtie app.') self.process.terminate() if self.process.is_alive(): time.sleep(1) self.process.kill() time.sleep(1) if self.process.is_alive(): print('Failed to stop Bowtie app.', file=sys.stderr) return print('Successfully stopped Bowtie app.') self.process.close() self.process = None @line_magic def bowtie(self, line=''): """Build and serve a Bowtie app.""" opts, appvar = self.parse_options(line, 'w:h:b:p:') width = opts.get('w', 1500) height = opts.get('h', 1000) border = opts.get('b', 2) port = opts.get('p', 9991) host = '0.0.0.0' sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) result = sock.connect_ex((host, port)) if result == 0: raise Exception(f'Port {port} is unavailable on host {host}, aborting.') global_ns = self.shell.user_global_ns local_ns = self.shell.user_ns try: # pylint: disable=eval-used app = eval(appvar, global_ns, local_ns) except NameError: raise UsageError(f'Could not find App {appvar}') if not isinstance(app, App): raise UsageError(f'App is of type {type(app)} needs to be type <bowtie.App>') app._build(notebook=get_notebook_name()) # pylint: disable=protected-access self.process = Process(target=app._serve) # pylint: disable=protected-access self.process.start() time.sleep(5) clear_output() display(HTML( f'<iframe src=http://localhost:9991 width={width} height={height} ' f'frameBorder={border}></iframe>' ))
jwkvam/conex
bowtie/_magic.py
Python
mit
4,773
[ "Bowtie" ]
f5041255897a3e8fb5c446e4c001a601c1320c9e9037cfe1d05ba6b1f0ce5884
# The MIT License (MIT) # # Original work Copyright (c) 2016 Taehoon Kim # Modified work Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import tensorflow as tf from model import Tower from utils import model_property image_summary = tf.summary.image scalar_summary = tf.summary.scalar histogram_summary = tf.summary.histogram merge_summary = tf.summary.merge SummaryWriter = tf.summary.FileWriter class batch_norm(object): """ This class creates an op that composes the specified tensor with a batch normalization layer. """ def __init__(self, epsilon=1e-5, momentum=0.9, name="batch_norm"): """Instance initialization""" with tf.variable_scope(name): self.epsilon = epsilon self.momentum = momentum self.name = name def __call__(self, x, train=True): """ Functional interface Args: x: tensor to compose train: set to True during training and False otherwise """ return tf.contrib.layers.batch_norm(x, decay=self.momentum, updates_collections=None, epsilon=self.epsilon, scale=True, is_training=train, scope=self.name) def conv2d(input_, output_dim, k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02, name="conv2d"): """ Compose specified symbol with 2D convolution layer Args: input_: tensor to compose. Shape: [N, H, W, C] output_dim: number of output features maps k_h: kernel height k_w: kernel width d_h: horizontal stride d_w: vertical stride stddev: standard deviation of gaussian distribution to use for random weight initialization name: name scope Returns: Composed tensor. """ with tf.variable_scope(name): w = tf.get_variable('w', [k_h, k_w, input_.get_shape()[-1], output_dim], initializer=tf.truncated_normal_initializer(stddev=stddev)) conv = tf.nn.conv2d(input_, w, strides=[1, d_h, d_w, 1], padding='SAME') biases = tf.get_variable('biases', [output_dim], initializer=tf.constant_initializer(0.0)) conv = tf.nn.bias_add(conv, biases) return conv def deconv2d(input_, output_shape, k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02, name="deconv2d", with_w=False): """ Compose specified symbol with 2D *transpose* convolution layer Args: input_: tensor to compose. Shape: [N, H, W, C] output_shape: output shape k_h: kernel height k_w: kernel width d_h: horizontal stride d_w: vertical stride stddev: standard deviation of gaussian distribution to use for random weight initialization name: name scope Returns: Composed tensor. """ with tf.variable_scope(name): # filter : [height, width, output_channels, in_channels] w = tf.get_variable('w', [k_h, k_w, output_shape[-1], input_.get_shape()[-1]], initializer=tf.random_normal_initializer(stddev=stddev)) deconv = tf.nn.conv2d_transpose(input_, w, output_shape=output_shape, strides=[1, d_h, d_w, 1]) biases = tf.get_variable('biases', [output_shape[-1]], initializer=tf.constant_initializer(0.0)) deconv = tf.reshape(tf.nn.bias_add(deconv, biases), output_shape) if with_w: return deconv, w, biases else: return deconv def lrelu(x, leak=0.2, name="lrelu"): """Compose specified tensor with leaky Rectifier Linear Unit""" return tf.maximum(x, leak*x) def linear(input_, output_size, scope=None, stddev=0.02, bias_start=0.0, with_w=False): """ Compose specified tensor with linear (fully-connected) layer Args: input_: tensor to compose. Shape: [N, M] output_size: number of output neurons scope: name scope stddev: standard deviation of gaussian distribution to use for random weight initialization name: name scope with_w: whether to also return parameter variables Returns: Composed tensor. Shape: [N, output_size] """ shape = input_.get_shape().as_list() with tf.variable_scope(scope or "Linear"): matrix = tf.get_variable("Matrix", [shape[1], output_size], tf.float32, tf.random_normal_initializer(stddev=stddev)) bias = tf.get_variable("bias", [output_size], initializer=tf.constant_initializer(bias_start)) if with_w: return tf.matmul(input_, matrix) + bias, matrix, bias else: return tf.matmul(input_, matrix) + bias class UserModel(Tower): """ User Model definition DIGITS creates an instance of this class for every tower it needs to create. This includes: - one for training, - one for validation, - one for testing. In the case of multi-GPU training, one training instance is created for every GPU. DIGITS takes care of doing the gradient averaging across GPUs so this class only needs to define the inference op and desired loss/cost function. """ def __init__(self, *args, **kwargs): """ Identify the correct input nodes. In the parent class, DIGITS conveniently sets the following fields: - self.is_training: whether this is a training graph - self.is_inference: whether this graph is created for inference/testing - self.x: input node. Shape: [N, H, W, C] - self.y: label. Shape: [N] for scalar labels, [N, H, W, C] otherwise. Only defined if self._is_training is True """ super(UserModel, self).__init__(*args, **kwargs) image_size = 64 output_size = 64 c_dim = 3 z_dim = 100 self.dcgan_init(image_size=image_size, output_size=output_size, c_dim=c_dim, z_dim=z_dim) @model_property def inference(self): """ op to use for inference """ # scale back to [0, 255] range images = (self.G * 127) + 128 images_flat = tf.reshape(images, [self.batch_size, self.image_size * self.image_size * self.c_dim]) # concatenate encoded z and generated image into a single flat structure zgen_flat = tf.reshape(self.DzGEN, [self.batch_size, self.z_dim]) return tf.concat([zgen_flat, images_flat], 1) @model_property def loss(self): """ Loss function Returns either an op or a list of dicts. If the returned value is an op then DIGITS will optimize against this op with respect to all trainable variables. If the returned value is a list then DIGITS will optimize against each loss in the list with respect to the specified variables. """ # here we are returning a list because we want to alternately optimize the # discriminator and the generator. losses = [ {'loss': self.dzgen_loss, 'vars': self.d_vars}, ] return losses def dcgan_init(self, image_size, output_size, z_dim, c_dim, gf_dim=64, df_dim=64, gfc_dim=1024, dfc_dim=1024): """ Args: output_size: (optional) The resolution in pixels of the images. [64] z_dim: (optional) Dimension of dim for Z. [100] gf_dim: (optional) Dimension of gen filters in first conv layer. [64] df_dim: (optional) Dimension of discrim filters in first conv layer. [64] gfc_dim: (optional) Dimension of gen units for for fully connected layer. [1024] dfc_dim: (optional) Dimension of discrim units for fully connected layer. [1024] c_dim: (optional) Dimension of image color. For grayscale input, set to 1. [3] """ self.image_size = image_size self.output_size = output_size self.z_dim = z_dim self.gf_dim = gf_dim self.df_dim = df_dim self.gfc_dim = gfc_dim self.dfc_dim = dfc_dim self.c_dim = c_dim self.batch_size = tf.shape(self.x)[0] self.soft_label_margin = 0.1 # batch normalization : deals with poor initialization helps gradient flow self.d_bn1 = batch_norm(name='d_bn1') self.d_bn2 = batch_norm(name='d_bn2') self.d_bn3 = batch_norm(name='d_bn3') self.g_bn0 = batch_norm(name='g_bn0') self.g_bn1 = batch_norm(name='g_bn1') self.g_bn2 = batch_norm(name='g_bn2') self.g_bn3 = batch_norm(name='g_bn3') self.build_model() def build_model(self): # reshape/rescale x self.images = (tf.reshape(self.x, shape=[self.batch_size, self.image_size, self.image_size, self.c_dim], name='x_reshaped') - 128) / 127. # create discriminator/encoder self.DzGEN, self.D_logits = self.discriminator(self.images, reuse=False) # create generator self.G = self.generator(self.DzGEN) # loss is now L2 distance between input image and generator output self.dzgen_loss = tf.reduce_mean(tf.square(self.G - self.images), name="loss_DzGEN") # debug self.summaries.append(image_summary("G", self.G, max_outputs=3)) self.summaries.append(image_summary("X", self.images, max_outputs=3)) self.summaries.append(histogram_summary("G_hist", self.G)) self.summaries.append(histogram_summary("X_hist", self.images)) self.summaries.append(scalar_summary("DzGen_loss", self.dzgen_loss)) # all trainable variables t_vars = tf.trainable_variables() # d variables self.d_vars = [var for var in t_vars if 'd_' in var.name] def discriminator(self, image, y=None, reuse=False): """ Create the discriminator This creates a string of layers: - input - [N, 64, 64, 3] - conv layer with 64 5x5 kernels and 2x2 stride - [N, 32, 32, 64] - leaky relu - [N, 32, 32, 64] - conv layer with 128 5x5 kernels and 2x2 stride - [N, 16, 16, 32] - batch norm - [N, 16, 16, 32] - leaky relu - [N, 16, 16, 32] - conv layer with 256 5x5 kernels and 2x2 stride - [N, 8, 8, 256] - batch norm - [N, 8, 8, 256] - leaky relu - [N, 8, 8, 256] - conv layer with 256 5x5 kernels and 2x2 stride - [N, 4, 4, 512] - batch norm - [N, 4, 4, 512] - leaky relu - [N, 4, 4, 512] - flatten - [N, 8192] - linear layer with 1 output neurons - [N, 1] - sigmoid - [N,1] Args: image: batch of input images - shape: [N, H, W, C] y: batch of one-hot encoded labels - shape: [N, K] reuse: whether to re-use previously created variables """ # NOTE: although we are really creating an encoder here we need to re-use the same # variable scope (i.e. "discriminator") as in the original GAN so we can re-use # learned parameters with tf.variable_scope("discriminator") as scope: if reuse: scope.reuse_variables() h0 = lrelu(conv2d(image, self.df_dim, name='d_h0_conv')) h1 = lrelu(self.d_bn1(conv2d(h0, self.df_dim*2, name='d_h1_conv'), train=self.is_training)) h2 = lrelu(self.d_bn2(conv2d(h1, self.df_dim*4, name='d_h2_conv'), train=self.is_training)) h3 = lrelu(self.d_bn3(conv2d(h2, self.df_dim*8, name='d_h3_conv'), train=self.is_training)) h3_size = ((self.output_size // 16) ** 2) * self.df_dim * 8 h4 = linear(tf.reshape(h3, [self.batch_size, h3_size]), self.z_dim, 'd_h3_lin_retrain') return h4, h4 def generator(self, z, y=None): """ Create the generator This creates a string of layers: - input - [N, 100] - linear layer with 8192 output neurons - [N, 8192] - reshape - [N, 4, 4, 512] - batch norm - [N, 4, 4, 512] - relu - [N, 4, 4, 512] - transpose convolution with 256 filters and stride 2 - [N, 8, 8, 256] - batch norm - [N, 8, 8, 256] - relu - [N, 8, 8, 256] - transpose convolution with 128 filters and stride 2 - [N, 16, 16, 128] - batch norm - [N, 16, 16, 128] - relu - [N, 16, 16, 128] - transpose convolution with 64 filters and stride 2 - [N, 32, 32, 64] - batch norm - [N, 32, 32, 64] - relu - [N, 32, 32, 64] - transpose convolution with 3 filters and stride 2 - [N, 64, 64, 3] - tanh - [N, 64, 64, 3] """ with tf.variable_scope("generator"): s = self.output_size s2, s4, s8, s16 = int(s/2), int(s/4), int(s/8), int(s/16) # project `z` and reshape self.z_, self.h0_w, self.h0_b = linear(z, self.gf_dim*8*s16*s16, 'g_h0_lin', with_w=True) self.h0 = tf.reshape(self.z_, [-1, s16, s16, self.gf_dim * 8]) h0 = tf.nn.relu(self.g_bn0(self.h0, train=False)) self.h1, self.h1_w, self.h1_b = deconv2d(h0, [self.batch_size, s8, s8, self.gf_dim*4], name='g_h1', with_w=True) h1 = tf.nn.relu(self.g_bn1(self.h1, train=False)) h2, self.h2_w, self.h2_b = deconv2d(h1, [self.batch_size, s4, s4, self.gf_dim*2], name='g_h2', with_w=True) h2 = tf.nn.relu(self.g_bn2(h2, train=False)) h3, self.h3_w, self.h3_b = deconv2d(h2, [self.batch_size, s2, s2, self.gf_dim*1], name='g_h3', with_w=True) h3 = tf.nn.relu(self.g_bn3(h3, train=False)) h4, self.h4_w, self.h4_b = deconv2d(h3, [self.batch_size, s, s, self.c_dim], name='g_h4', with_w=True) return tf.nn.tanh(h4)
ethantang95/DIGITS
examples/gan/network-celebA-encoder.py
Python
bsd-3-clause
16,138
[ "Gaussian" ]
0b9e75aeef2b10cdf920258bcf11c55cdf51a586c6c3193e5425a7984b8cfb4d
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # This software (including its Debian packaging) is available to you under the terms of the GPL-3, # see "/usr/share/common-licenses/GPL-3". # Software is created and maintained by Laboratory of Biomolecular Systems Simulation at University of Gdansk. # Contributors: # - Tomasz Makarewicz (makson96@gmail.com) # # This tests returs GROMACS version detected by plugin and check if it equal $GROMACS_VER from __future__ import print_function import os import shutil import sys import pymol_plugin_dynamics print("######################") print("Starting Basic Simulation test") print("######################") status, s_params = pymol_plugin_dynamics.init_function(travis_ci=True) # Download molecule for tests project_name = "2fjz" s_params.change_project_name(project_name) project_dir = pymol_plugin_dynamics.get_project_dirs(project_name) shutil.copy("/usr/share/pdb-files/{}.pdb".format(project_name), "{}{}.pdb".format(project_dir, project_name)) s_params.create_cfg_files() # Execute dynamics simulation pymol_plugin_dynamics.dynamics(s_params) if not os.path.isfile(project_dir + "2fjz_multimodel.pdb"): sys.exit(1) print("######################") print("Basic Simulation test finished successfully") print("######################")
tomaszmakarewicz/Dynamics
tests/basic_simulation_test.py
Python
gpl-3.0
1,299
[ "Gromacs" ]
637d428c064d54adc01f83498f8a2c64bf9b35910f9f9e01679805902b886c12
""" This module loads all the classes from the VTK Charts library into its namespace. This is an optional module.""" import os if os.name == 'posix': from libvtkChartsPython import * else: from vtkChartsPython import *
jeffbaumes/jeffbaumes-vtk
Wrapping/Python/vtk/charts.py
Python
bsd-3-clause
230
[ "VTK" ]
e1b4d455a3ef95d62ef8d7a096c095b664064e3421b33d1b0e08ebf28a63ca06
import os import subprocess import jinja2 import json import openchemistry as oc def run_calculation(geometry_file, output_file, params, scratch_dir): # Read in the geometry from the geometry file # This container expects the geometry file to be in .xyz format with open(geometry_file) as f: xyz_structure = f.read() # remove the first two lines in the xyz file # (i.e. number of atom and optional comment) xyz_structure = xyz_structure.split('\n')[2:] xyz_structure = '\n '.join(xyz_structure) # Read the input parameters theory = params.get('theory', 'hf') task = params.get('task', 'energy') basis = params.get('basis', 'cc-pvdz') functional = params.get('functional', 'b3lyp') charge = params.get('charge', 0) multiplicity = params.get('multiplicity', 1) theory = theory.lower() if theory == 'hf': _theory = 'scf' # We update the multiplicity key when using scf. SCF accept names and # not numbers. multiplicities = {'1': 'singlet', '2': 'doublet', '3': 'triplet'} _multiplicity = multiplicities.get(str(multiplicity), 'singlet') else: _theory = theory _multiplicity = multiplicity task = task.lower() if task == 'frequencies': _task = 'task {0} {1}\ntask {0} {2}'.format(_theory, 'optimize', 'freq') elif task == 'optimize': _task = 'task {0} {1}'.format(_theory, 'optimize') else: # single point energy _task = 'task {0}'.format(_theory) context = { 'task': _task, 'theory': _theory, 'functional': functional, 'charge': charge, 'multiplicity': _multiplicity, 'basis': basis, } # Combine the input parameters and geometry into a concrete input file # that can be executed by the simulation code template_path = os.path.dirname(__file__) jinja2_env = \ jinja2.Environment(loader=jinja2.FileSystemLoader(template_path), trim_blocks=True) os.makedirs(scratch_dir, exist_ok=True) os.chdir(scratch_dir) raw_input_file = os.path.join(scratch_dir, 'raw.in') raw_output_file = os.path.join(scratch_dir, 'raw.json') with open(raw_input_file, 'wb') as f: if _theory == 'dft': jinja2_env.get_template('nwchem.in.j2').stream(**context, xyz_structure=xyz_structure).dump(f, encoding='utf8') else: jinja2_env.get_template('nwchem.sfc.in.j2').stream(**context, xyz_structure=xyz_structure).dump(f, encoding='utf8') # Execute the code and write to output cpus = 4 subprocess.run(['mpirun', '-np', str(cpus), "/opt/nwchem/bin/LINUX64/nwchem", raw_input_file, raw_output_file]) # Convert the raw output file generated by the code execution, into the # output format declared in the container description (cjson) with open(raw_output_file) as f: cjson = oc.NWChemJsonReader(f).read() # Save the calculation parameters in the cjson output for future reference cjson['inputParameters'] = params with open(output_file, 'w') as f: json.dump(cjson, f)
OpenChemistry/mongochemdeploy
docker/nwchem/src/run.py
Python
bsd-3-clause
3,174
[ "NWChem" ]
9241c7b368b588164a69f921b1746c4aa6730def0d53bd387f37f19cfff11675
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) import os from spack import * class Gaussian(Package): """Gaussian is a computer program for computational chemistry""" homepage = "http://www.gaussian.com/" manual_download = True maintainers = ['antoniokaust'] version('16-B.01', sha256='0b2cf60aa85d2c8c8e7547446e60e8e8cb67eec20e5f13c4a3e4e7616dcdf122') version('09-D.01', sha256='ef14885b5e334b6ec44a93bfd7225c634247dc946416af3087ab055bf05f54cd') @property def ver(self): return self.version.string.split('-')[0] @property def g_root(self): return join_path(self.prefix, 'g' + self.ver) @property def g_bsd(self): return join_path(self.g_root, 'bsd') def url_for_version(self, version): return "file://{0}/g{1}.tgz".format(os.getcwd(), version) def install(self, spec, prefix): install_tree('.', self.g_root) @run_after('install') def bsd_install(self): with working_dir(self.g_root): bsd_install = Executable(join_path('bsd', 'install')) bsd_install() def setup_run_environment(self, env): env.set('g' + self.ver + 'root', self.prefix) env.prepend_path('GAUSS_EXEDIR', self.g_root) env.prepend_path('GAUSS_EXEDIR', self.g_bsd) env.prepend_path('PATH', self.g_root) env.prepend_path('PATH', self.g_bsd) env.set('GAUSS_LEXEDIR', join_path(self.g_root, 'linda-exe')) env.set('GAUSS_ARCHDIR', join_path(self.g_root, 'arch')) env.set('GAUSS_BSDDIR', self.g_bsd) env.set('G' + self.ver + 'BASIS', join_path(self.g_root, 'basis')) env.prepend_path('LD_LIBRARY_PATH', self.g_root) env.prepend_path('LD_LIBRARY_PATH', self.g_bsd)
LLNL/spack
var/spack/repos/builtin/packages/gaussian/package.py
Python
lgpl-2.1
1,917
[ "Gaussian" ]
a980caed2785f46c711104c618baca85361aecfa324fbc79a426923a2e4e1f5c
import openmoltools import mdtraj as md from simtk.openmm import app import simtk.openmm as mm from simtk import unit as u import openmmtools temperature = 300. * u.kelvin equil_friction = 5 / u.picoseconds equil_timestep = 1 * u.femtoseconds pressure = 1.0 * u.atmospheres barostat_frequency = 25 discard_steps = 10000 n_steps = 500000 cutoff = 1.0 * u.nanometers output_frequency = 500 n_monomers = 500 cas = "tip3p.xml" ffxml_filename = "%s.xml" % cas ff = app.ForceField(ffxml_filename) pdb_filename = "./%s.pdb" % cas box_pdb_filename = "./box.pdb" monomer_pdb_filenames = [pdb_filename] packed_trj = openmoltools.packmol.pack_box(monomer_pdb_filenames, [n_monomers]) packed_trj.save(box_pdb_filename) out_pdb_filename = "./equil/equil.pdb" final_step_pdb_filename = "./equil/equil_final_step.pdb" dcd_filename = "./equil/equil.dcd" log_filename = "./equil/equil.log" topology = packed_trj.top.to_openmm(packed_trj) positions = packed_trj.openmm_positions(0) system = ff.createSystem(topology, nonbondedMethod=app.PME, nonbondedCutoff=cutoff, constraints=app.HBonds) integrator = mm.LangevinIntegrator(temperature, equil_friction, equil_timestep / 10.) system.addForce(mm.MonteCarloBarostat(pressure, temperature, barostat_frequency)) simulation = app.Simulation(topology, system, integrator) simulation.context.setPositions(positions) print('Minimizing...') simulation.minimizeEnergy() simulation.context.setVelocitiesToTemperature(temperature) print('Equilibrating...') simulation.step(discard_steps) # Don't even save the first XXX ps integrator.setStepSize(equil_timestep) simulation.reporters.append(app.DCDReporter(dcd_filename, n_steps - 1)) simulation.reporters.append(app.PDBReporter(out_pdb_filename, n_steps - 1)) simulation.reporters.append(app.StateDataReporter(open(log_filename, 'w'), output_frequency, step=True, time=True, speed=True, density=True)) simulation.step(n_steps) del simulation del system t = md.load(dcd_filename, top=out_pdb_filename) t0 = t[-1] t0.unitcell_lengths = t.unitcell_lengths.mean(0) t0.save(out_pdb_filename) del t del t0 t = md.load(dcd_filename, top=out_pdb_filename)[-1] t.save(final_step_pdb_filename)
choderalab/LiquidBenchmark
src/test_stepsize/old/equilibrate_water.py
Python
gpl-2.0
2,170
[ "MDTraj", "OpenMM" ]
54c134fff0c655cf1ef9d4f37382d287f483ec89c578d95fac02647a5dfb31c9
""" Simple Gaussian Naive Bayes Classification ------------------------------------------ Figure 9.2 A decision boundary computed for a simple data set using Gaussian naive Bayes classification. The line shows the decision boundary, which corresponds to the curve where a new point has equal posterior probability of being part of each class. In such a simple case, it is possible to find a classification with perfect completeness and contamination. This is rarely the case in the real world. """ # Author: Jake VanderPlas # License: BSD # The figure produced by this code is published in the textbook # "Statistics, Data Mining, and Machine Learning in Astronomy" (2013) # For more information, see http://astroML.github.com # To report a bug or issue, use the following forum: # https://groups.google.com/forum/#!forum/astroml-general import numpy as np from matplotlib import pyplot as plt from matplotlib import colors from sklearn.naive_bayes import GaussianNB #---------------------------------------------------------------------- # This function adjusts matplotlib settings for a uniform feel in the textbook. # Note that with usetex=True, fonts are rendered with LaTeX. This may # result in an error if LaTeX is not installed on your system. In that case, # you can set usetex to False. from astroML.plotting import setup_text_plots setup_text_plots(fontsize=8, usetex=True) #------------------------------------------------------------ # Simulate some data np.random.seed(0) mu1 = [1, 1] cov1 = 0.3 * np.eye(2) mu2 = [5, 3] cov2 = np.eye(2) * np.array([0.4, 0.1]) X = np.concatenate([np.random.multivariate_normal(mu1, cov1, 100), np.random.multivariate_normal(mu2, cov2, 100)]) y = np.zeros(200) y[100:] = 1 #------------------------------------------------------------ # Fit the Naive Bayes classifier clf = GaussianNB() clf.fit(X, y) # predict the classification probabilities on a grid xlim = (-1, 8) ylim = (-1, 5) xx, yy = np.meshgrid(np.linspace(xlim[0], xlim[1], 71), np.linspace(ylim[0], ylim[1], 81)) Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()]) Z = Z[:, 1].reshape(xx.shape) #------------------------------------------------------------ # Plot the results fig = plt.figure(figsize=(5, 3.75)) ax = fig.add_subplot(111) ax.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.binary, zorder=2) ax.contour(xx, yy, Z, [0.5], colors='k') ax.set_xlim(xlim) ax.set_ylim(ylim) ax.set_xlabel('$x$') ax.set_ylabel('$y$') plt.show()
kcavagnolo/astroML
book_figures/chapter9/fig_simple_naivebayes.py
Python
bsd-2-clause
2,509
[ "Gaussian" ]
5681743cc0234bdf19497a47c4a391e6c100e660445e056ee4ebe592922407be
from __future__ import absolute_import, division, print_function # ---------------------------------------------------------------------------- # Copyright (c) 2013--, scikit-bio development team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file COPYING.txt, distributed with this software. # ---------------------------------------------------------------------------- from skbio.io import FileFormatError class SequenceCollectionError(Exception): """General error for sequence collection validation failures.""" pass class StockholmParseError(FileFormatError): """Exception raised when a Stockholm formatted file cannot be parsed.""" pass
JWDebelius/scikit-bio
skbio/alignment/_exception.py
Python
bsd-3-clause
711
[ "scikit-bio" ]
78aa499903b2e45ffd5b5ffa9b2797d463987ae6ee53edeae92582840ef4f1aa
#!/usr/bin/python # # Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Coordinates a global build of a Spinnaker "release". The term "release" here is more of an encapsulated build. This is not an official release. It is meant for developers. The gradle script does not yet coordinate a complete build, so this script fills that gap for the time being. It triggers all the subsystem builds and then publishes the resulting artifacts. Publishing is typically to bintray for debian and redhat packages and a docker repository for containers. Usage: export BINTRAY_USER= export BINTRAY_KEY= # subject/repository are the specific bintray repository # owner and name components that specify the repository you are updating. # The repository must already exist, but can be empty. BINTRAY_REPOSITORY=subject/repository # cd <build root containing subsystem subdirectories> # this is where you ran refresh_source.sh from ./spinnaker/dev/build_release.sh --bintray_repo=$BINTRAY_REPOSITORY """ import argparse import base64 import collections import datetime import fnmatch import glob import json import os import multiprocessing import multiprocessing.pool import re import shlex import shutil import subprocess import sys import tempfile import threading import time import urllib2 from urllib2 import HTTPError import refresh_source from google.cloud import pubsub from spinnaker.run import run_quick SUBSYSTEM_LIST = ['clouddriver', 'orca', 'front50', 'echo', 'rosco', 'gate', 'igor', 'fiat', 'deck', 'spinnaker'] ADDITIONAL_SUBSYSTEMS = ['spinnaker-monitoring', 'halyard'] VALID_PLATFORMS = ['debian', 'redhat'] GCB_BUILD_STATUS_TIMEOUT = 1200 def determine_project_root(): return os.path.abspath(os.path.dirname(__file__) + '/..') def determine_modules_with_debians(gradle_root): files = glob.glob(os.path.join(gradle_root, '*', 'build', 'debian', 'control')) dirs = [os.path.dirname(os.path.dirname(os.path.dirname(file))) for file in files] if os.path.exists(os.path.join(gradle_root, 'build', 'debian', 'control')): dirs.append(gradle_root) return dirs def determine_modules_with_redhats(gradle_root): dirs = [] for dirname, subdirs, files in os.walk(gradle_root): for fname in files: if fnmatch.fnmatch(fname, '*.rpm'): dirs.append( os.path.dirname(os.path.dirname(dirname)) ) return dirs def determine_package_version(platform, gradle_root): if platform == 'debian': root = determine_modules_with_debians(gradle_root) if not root: return None with open(os.path.join(root[0], 'build', 'debian', 'control')) as f: content = f.read() match = re.search('(?m)^Version: (.*)', content) return match.group(1) elif platform == 'redhat': root = determine_modules_with_redhats(gradle_root) if not root: return None comp = os.path.basename(os.path.normpath(gradle_root)) build_root = os.getcwd() version_file = '{}-rpm-version.txt'.format(comp) version = open(version_file, 'r').read().rstrip() if re.match('-$', version): version = version + '0' return version def run_shell_and_log(cmd_list, logfile, cwd=None): for cmd in cmd_list: parsed = shlex.split(cmd) log = None if not os.path.exists(logfile): log = open(logfile, 'w') else: log = open(logfile, 'a') log.write('Executing command: {}\n---\n'.format(cmd)) subprocess.check_call(parsed, stdout=log, stderr=log, cwd=cwd) log.write('\n---\nFinished executing command: {}'.format(cmd)) if log: log.close() class Builder(object): """Knows how to coordinate a Spinnaker release.""" def __init__(self, options, build_number=None, container_builder=None): self.__package_list = [] self.__build_failures = [] self.__background_processes = [] os.environ['NODE_ENV'] = os.environ.get('NODE_ENV', 'dev') self.__build_number = build_number or os.environ.get('BUILD_NUMBER') or '{:%Y-%m-%d}'.format(datetime.datetime.utcnow()) self.__gcb_service_account = options.gcb_service_account self.__options = options if (container_builder and container_builder not in ['gcb', 'docker', 'gcb-trigger']): raise ValueError('Invalid container_builder. Must be empty, "gcb" or "docker"') self.refresher = refresh_source.Refresher(options) if options.bintray_repo and options.build: self.__verify_bintray() self.__project_dir = determine_project_root() def determine_gradle_root(self, name): if self.__options.platform == "debian": gradle_root = (name if name != 'spinnaker' else os.path.join(self.__project_dir, 'experimental/buildDeb')) gradle_root = name if name != 'spinnaker' else self.__project_dir return gradle_root def start_deb_build(self, name): """Start a subprocess to build and publish the designated component. This function runs a gradle 'candidate' task using the last git tag as the package version and the Bintray configuration passed through arguments. The 'candidate' task release builds the source, packages the debian and jar files, and publishes those to the respective Bintray '$org/$repository'. The naming of the gradle task is a bit unfortunate because of the terminology used in the Spinnaker product release process. The artifacts produced by this script are not 'release candidate' artifacts, they are pre-validation artifacts. Maybe we can modify the task name at some point. The gradle 'candidate' task throws a 409 if the package we are trying to publish already exists. We'll publish unique package versions using build numbers. These will be transparent to end users since the only meaningful version is the Spinnaker product version. We will use -Prelease.useLastTag=true and ensure the last git tag is the version we want to use. This tag has to be of the form 'X.Y.Z-$build' or 'vX.Y.Z-$build for gradle to use the tag as the version. This script will assume that the source has been properly tagged to use the latest tag as the package version for each component. Args: name [string]: Name of the subsystem repository. """ self.__debian_build(name, self.__options, self.__build_number, self.determine_gradle_root(name)) @classmethod def __debian_build(cls, name, options, build_number, gradle_root): jarRepo = options.jar_repo parts = options.bintray_repo.split('/') if len(parts) != 2: raise ValueError( 'Expected --bintray_repo to be in the form <owner>/<repo>') org, packageRepo = parts[0], parts[1] bintray_key = os.environ['BINTRAY_KEY'] bintray_user = os.environ['BINTRAY_USER'] if options.nebula: target = 'candidate' extra_args = [ '--stacktrace', '-Prelease.useLastTag=true', '-PbintrayPackageBuildNumber={number}'.format(number=build_number), '-PbintrayOrg="{org}"'.format(org=org), '-PbintrayPackageRepo="{repo}"'.format(repo=packageRepo), '-PbintrayJarRepo="{jarRepo}"'.format(jarRepo=jarRepo), '-PbintrayKey="{key}"'.format(key=bintray_key), '-PbintrayUser="{user}"'.format(user=bintray_user) ] else: target = 'buildDeb' extra_args = [] if options.info_gradle: extra_args.append('--info') if options.debug_gradle: extra_args.append('--debug') if options.gradle_cache_path: extra_args.append('--gradle-user-home={}'.format(options.gradle_cache_path)) if (not options.run_unit_tests or (name == 'deck' and not 'CHROME_BIN' in os.environ)): extra_args.append('-x test') if name == 'halyard': extra_args.append('-PbintrayPackageDebDistribution=trusty-nightly') cmds = [ './gradlew {extra} {target}'.format(extra=' '.join(extra_args), target=target) ] logfile = '{name}-debian-build.log'.format(name=name) if os.path.exists(logfile): os.remove(logfile) run_shell_and_log(cmds, logfile, cwd=gradle_root) def start_rpm_build(self, name): """Start a subprocess to build and publish the designated component. This function runs a gradle 'buildRpm' task using the last git tag as the package version and the Bintray configuration passed through arguments. The 'buildRpm' task release builds the source, packages the redhat and jar files, and publishes those to the respective Bintray '$org/$repository'. The naming of the gradle task is a bit unfortunate because of the terminology used in the Spinnaker product release process. The artifacts produced by this script are not 'release candidate' artifacts, they are pre-validation artifacts. Maybe we can modify the task name at some point. The gradle 'buildRpm' task throws a 409 if the package we are trying to publish already exists. We'll publish unique package versions using build numbers. These will be transparent to end users since the only meaningful version is the Spinnaker product version. We will use -Prelease.useLastTag=true and ensure the last git tag is the version we want to use. This tag has to be of the form 'X.Y.Z-$build' or 'vX.Y.Z-$build for gradle to use the tag as the version. This script will assume that the source has been properly tagged to use the latest tag as the package version for each component. Args: name [string]: Name of the subsystem repository. """ gradle_root = self.determine_gradle_root(name) self.__redhat_build(name, self.__options, self.__build_number, gradle_root) @classmethod def __redhat_build(cls, name, options, build_number, gradle_root): jarRepo = options.jar_repo parts = options.bintray_repo.split('/') if len(parts) != 2: raise ValueError( 'Expected --bintray_repo to be in the form <owner>/<repo>') org, packageRepo = parts[0], parts[1] bintray_key = os.environ['BINTRAY_KEY'] bintray_user = os.environ['BINTRAY_USER'] if options.nebula: target = 'buildRpm' extra_args = [ '--stacktrace', '-Prelease.useLastTag=true', '-PbintrayPackageBuildNumber={number}'.format(number=build_number) ] else: target = 'buildRpm' extra_args = [ '-PbintrayPackageBuildNumber={number}'.format(number=build_number) ] if options.debug_gradle: extra_args.append('--debug') if options.gradle_cache_path: extra_args.append('--gradle-user-home={}'.format(options.gradle_cache_path)) if (not options.run_unit_tests or (name == 'deck' and not 'CHROME_BIN' in os.environ)): extra_args.append('-x test') # Currently spinnaker is in a separate location cmds = [ './gradlew {extra} {target}'.format(extra=' '.join(extra_args), target=target) ] logfile = '{name}-rhel-build.log'.format(name=name) if os.path.exists(logfile): os.remove(logfile) run_shell_and_log(cmds, logfile, cwd=gradle_root) def start_container_build(self, name): """Start a subprocess to build a container image of the subsystem. Uses either Google Container Builder or Docker with configuration files produced during BOM generation to build the container images. The configuration files are assumed to be in the parent directory of the subsystem's Gradle root. Args: name [string]: Name of the subsystem repository. """ gradle_root = self.determine_gradle_root(name) if self.__options.container_builder == 'gcb': self.__gcb_build(name, gradle_root, self.__options.gcb_service_account, self.__options.gcb_project) elif self.__options.container_builder == 'gcb-trigger': self.__gcb_trigger_build(name, gradle_root, self.__options.gcb_service_account, self.__options.gcb_service_account_json, self.__options.gcb_project, self.__options.gcb_mirror_base_url) elif self.__options.container_builder == 'docker': self.__docker_build(name, gradle_root) else: raise NotImplemented( 'container_builder="{0}"'.format(self.__options.container_builder)) @classmethod def __gcb_build(cls, name, gradle_root, gcb_service_account, gcb_project): # Local .gradle dir stomps on GCB's .gradle directory when the gradle # wrapper is installed, so we need to delete the local one. # The .gradle dir is transient and will be recreated on the next gradle # build, so this is OK. gradle_cache = '{name}/.gradle'.format(name=name) if os.path.isdir(gradle_cache): # Tell rmtree to delete the directory even if it's non-empty. shutil.rmtree(gradle_cache) cmds = [ ('gcloud container builds submit --account={account} --project={project} --config"../{name}-gcb.yml"' .format(name=name, account=gcb_service_account, project=gcb_project)) ] logfile = '{name}-gcb-build.log'.format(name=name) if os.path.exists(logfile): os.remove(logfile) run_shell_and_log(cmds, logfile, cwd=gradle_root) @classmethod def __gcb_trigger_build(cls, name, gradle_root, gcb_service_account, gcb_service_account_json, gcb_project, mirror_base_url): logfile = '{name}-gcb-triggered-build.log'.format(name=name) tag = cls.__tag_gcb_mirror(name, mirror_base_url, gradle_root, logfile) subscription = cls.__configure_gcb_pubsub(name, gcb_service_account_json) cls.__listen_gcb_build_status(name, subscription, tag, gcb_project, gcb_service_account, logfile) @classmethod def __tag_gcb_mirror(cls, name, mirror_base_url, gradle_root, logfile): tag = run_quick('cat {name}-gcb-trigger.yml'.format(name=name), echo=False).stdout.strip() cmds = [ 'git remote add mirror {base_url}/{name}.git'.format(base_url=mirror_base_url, name=name), 'git fetch mirror', 'git checkout mirror/master', 'git merge origin/master', 'git push mirror master', 'git push mirror {tag}'.format(name=name, tag=tag) ] if os.path.exists(logfile): os.remove(logfile) run_shell_and_log(cmds, logfile, cwd=gradle_root) return tag @classmethod def __configure_gcb_pubsub(cls, name, gcb_service_account_json): pubsub_client = pubsub.Client.from_service_account_json(gcb_service_account_json) topic = pubsub_client.topic('cloud-builds') # GCB creates a topic named this automatically. print 'Creating subscription: cloud-builds-{}'.format(name) subscription = topic.subscription('cloud-builds-{name}'.format(name=name)) subscription.create() if not subscription.exists(): raise LookupError('GCB pubsub subscription creation for subscription id {} failed.'.format(subscription.name)) return subscription @classmethod def __listen_gcb_build_status(cls, name, subscription, tag, gcb_project, gcb_service_account, logfile): def fail_build(name): raise Exception('GCB triggered build for {} timed out'.format(name)) # Set an egg timer to fail. timer = threading.Timer(GCB_BUILD_STATUS_TIMEOUT, fail_build, (name)) timer.start() # Poll Google Cloud Pubsub for the build status completed = False try: while not completed: pulled = subscription.pull() for ack_id, message in pulled: comp_name = '' if name == 'spinnaker-monitoring': comp_name = 'monitoring-daemon' else: comp_name = name payload = json.loads(message.data) repo_name = payload['source']['repoSource']['repoName'] tag_name = payload['source']['repoSource']['tagName'] if repo_name == comp_name and tag_name == tag: subscription.acknowledge([ack_id]) status = payload['status'] print 'Received status: {} for building tag {} of {}'.format(status, tag_name, comp_name) if status in ['SUCCESS', 'FAILURE']: completed = True build_id = payload['id'] print 'Retrieving logs for build_id: {}'.format(build_id) get_log_cmd = ('gcloud container builds log --project {project} --account {account} {id}' .format(project=gcb_project, account=gcb_service_account, id=build_id)) build_log = run_quick(get_log_cmd, echo=False).stdout.strip() with open(logfile, 'a') as log: log.write('Fetching GCB build logs with: {}\n---\n'.format(get_log_cmd)) log.write(build_log) log.write('\n---\nFinished fetching GCB build logs') if status == 'FAILURE': raise Exception('Triggered GCB build for {name} failed.'.format(name=comp_name)) if not completed: time.sleep(10) finally: timer.cancel() subscription.delete() @classmethod def __docker_build(cls, name, gradle_root): docker_tag = run_quick('cat {name}-docker.yml', echo=False).stdout.strip() cmds = [ 'docker build -f Dockerfile -t {docker_tag} .'.format(name=name, docker_tag=docker_tag), 'docker push {docker_tag}'.format(name=name, docker_tag=docker_tag) ] logfile = '{name}-docker-build.log'.format(name=name) if os.path.exists(logfile): os.remove(logfile) run_shell_and_log(cmds, logfile, cwd=gradle_root) def publish_to_bintray(self, source, package, version, path, debian_tags=''): bintray_key = os.environ['BINTRAY_KEY'] bintray_user = os.environ['BINTRAY_USER'] parts = self.__options.bintray_repo.split('/') if len(parts) != 2: raise ValueError( 'Expected --bintray_repo to be in the form <owner>/<repo>') subject, repo = parts[0], parts[1] pkg_filename = os.path.basename(path) if (pkg_filename.startswith('spinnaker-') and not package.startswith('spinnaker')): package = 'spinnaker-' + package if debian_tags and debian_tags[0] != ';': debian_tags = ';' + debian_tags url = ('https://api.bintray.com/content' '/{subject}/{repo}/{package}/{version}/{path}' '{debian_tags}' ';publish=1;override=1' .format(subject=subject, repo=repo, package=package, version=version, path=path, debian_tags=debian_tags)) with open(source, 'r') as f: data = f.read() put_request = urllib2.Request(url) encoded_auth = base64.encodestring('{user}:{pwd}'.format( user=bintray_user, pwd=bintray_key))[:-1] # strip eoln put_request.add_header('Authorization', 'Basic ' + encoded_auth) put_request.get_method = lambda: 'PUT' try: result = urllib2.urlopen(put_request, data) except HTTPError as put_error: if put_error.code == 409 and self.__options.wipe_package_on_409: # The problem here is that BinTray does not allow packages to change once # they have been published (even though we are explicitly asking it to # override). PATCH wont work either. # Since we are building from source, we don't really have a version # yet, since we are still modifying the code. Either we need to generate a new # version number every time or we don't want to publish these. # Ideally we could control whether or not to publish. However, # if we do not publish, then the repository will not be visible without # credentials, and adding conditional credentials into the packer scripts # starts getting even more complex. # # We cannot seem to delete individual versions either (at least not for # InstallSpinnaker.sh, which is where this problem seems to occur), # so we'll be heavy handed and wipe the entire package. print 'Got 409 on {url}.'.format(url=url) delete_url = ('https://api.bintray.com/content' '/{subject}/{repo}/{path}' .format(subject=subject, repo=repo, path=path)) print 'Attempt to delete url={url} then retry...'.format(url=delete_url) delete_request = urllib2.Request(delete_url) delete_request.add_header('Authorization', 'Basic ' + encoded_auth) delete_request.get_method = lambda: 'DELETE' try: urllib2.urlopen(delete_request) print 'Deleted...' except HTTPError as ex: # Maybe it didn't exist. Try again anyway. print 'Delete {url} got {ex}. Try again anyway.'.format(url=url, ex=ex) print 'Retrying {url}'.format(url=url) result = urllib2.urlopen(put_request, data) print 'SUCCESS' elif put_error.code != 400: raise else: # Try creating the package and retrying. pkg_url = os.path.join('https://api.bintray.com/packages', subject, repo) print 'Creating an entry for {package} with {pkg_url}...'.format( package=package, pkg_url=pkg_url) # All the packages are from spinnaker so we'll hardcode it. # Note spinnaker-monitoring is a github repo with two packages. # Neither is "spinnaker-monitoring"; that's only the github repo. gitname = (package.replace('spinnaker-', '') if not package.startswith('spinnaker-monitoring') else 'spinnaker-monitoring') pkg_data = """{{ "name": "{package}", "licenses": ["Apache-2.0"], "vcs_url": "https://github.com/spinnaker/{gitname}.git", "website_url": "http://spinnaker.io", "github_repo": "spinnaker/{gitname}", "public_download_numbers": false, "public_stats": false }}'""".format(package=package, gitname=gitname) pkg_request = urllib2.Request(pkg_url) pkg_request.add_header('Authorization', 'Basic ' + encoded_auth) pkg_request.add_header('Content-Type', 'application/json') pkg_request.get_method = lambda: 'POST' pkg_result = urllib2.urlopen(pkg_request, pkg_data) pkg_code = pkg_result.getcode() if pkg_code >= 200 and pkg_code < 300: result = urllib2.urlopen(put_request, data) code = result.getcode() if code < 200 or code >= 300: raise ValueError('{code}: Could not add version to {url}\n{msg}' .format(code=code, url=url, msg=result.read())) print 'Wrote {source} to {url}'.format(source=source, url=url) def publish_install_script(self, source): gradle_root = self.determine_gradle_root('spinnaker') version = determine_package_version(self.__options.platform, gradle_root) self.publish_to_bintray(source, package='spinnaker', version=version, path='InstallSpinnaker.sh') def publish_file(self, source, package, version): """Write a file to the bintray repository. Args: source [string]: The path to the source to copy must be local. """ path = os.path.basename(source) debian_tags = '' if self.__options.platform == 'debian': debian_tags = ';'.join(['deb_component=spinnaker', 'deb_distribution=trusty,utopic,vivid,wily', 'deb_architecture=all']) self.publish_to_bintray(source, package=package, version=version, path=path, debian_tags=debian_tags) def start_copy_debian_target(self, name): """Copies the debian package for the specified subsystem. Args: name [string]: The name of the subsystem repository. """ pids = [] gradle_root = self.determine_gradle_root(name) version = determine_package_version(self.__options.platform, gradle_root) if version is None: return [] for root in determine_modules_with_debians(gradle_root): deb_dir = '{root}/build/distributions'.format(root=root) non_spinnaker_name = '{name}_{version}_all.deb'.format( name=name, version=version) if os.path.exists(os.path.join(deb_dir, 'spinnaker-' + non_spinnaker_name)): deb_file = 'spinnaker-' + non_spinnaker_name elif os.path.exists(os.path.join(deb_dir, non_spinnaker_name)): deb_file = non_spinnaker_name else: module_name = os.path.basename( os.path.dirname(os.path.dirname(deb_dir))) deb_file = '{module_name}_{version}_all.deb'.format( module_name=module_name, version=version) if not os.path.exists(os.path.join(deb_dir, deb_file)): error = ('.deb for name={name} version={version} is not in {dir}\n' .format(name=name, version=version, dir=deb_dir)) raise AssertionError(error) from_path = os.path.join(deb_dir, deb_file) print 'Adding {path}'.format(path=from_path) self.__package_list.append(from_path) basename = os.path.basename(from_path) module_name = basename[0:basename.find('_')] if self.__options.bintray_repo: self.publish_file(from_path, module_name, version) return pids def start_copy_redhat_target(self, name): """Copies the redhat package for the specified subsystem. Args: name [string]: The name of the subsystem repository. """ pids = [] gradle_root = self.determine_gradle_root(name) version = determine_package_version(self.__options.platform, gradle_root) if version is None: return [] for root in determine_modules_with_redhats(gradle_root): rpm_dir = '{root}/build/distributions'.format(root=root) non_spinnaker_name = '{name}-{version}.noarch.rpm'.format( name=name, version=version) if os.path.exists(os.path.join(rpm_dir, 'spinnaker-' + non_spinnaker_name)): rpm_file = 'spinnaker-' + non_spinnaker_name elif os.path.exists(os.path.join(rpm_dir, non_spinnaker_name)): rpm_file = non_spinnaker_name else: module_name = os.path.basename(os.path.dirname( os.path.dirname(rpm_dir))) rpm_file = '{module_name}-{version}.noarch.rpm'.format( module_name=module_name, version=version) if not os.path.exists(os.path.join(rpm_dir, rpm_file)): error = ('.rpm for name={name} version={version} is not in {dir}\n' .format(name=name, version=version, dir=rpm_dir)) raise AssertionError(error) from_path = os.path.join(rpm_dir, rpm_file) print 'Adding {path}'.format(path=from_path) self.__package_list.append(from_path) basename = os.path.basename(from_path) module_name = re.search("^(.*)-{}.noarch.rpm$".format(version), basename).group(1) if self.__options.bintray_repo: self.publish_file(from_path, module_name, version) return pids def __do_build(self, subsys): if self.__options.platform == 'debian': try: self.start_deb_build(subsys) except Exception as ex: print ex self.__build_failures.append(subsys) elif self.__options.platform == 'redhat': try: self.start_rpm_build(subsys) except Exception as ex: self.__build_failures.append(subsys) def __do_container_build(self, subsys): try: # HACK: Space out the container builds to address scalability concerns. full_subsystem_list = SUBSYSTEM_LIST + ADDITIONAL_SUBSYSTEMS time.sleep(2 * full_subsystem_list.index(subsys)) self.start_container_build(subsys) except Exception as ex: print ex self.__build_failures.append(subsys) def build_container_images(self): """Build the Spinnaker packages as container images. """ subsystems = [comp for comp in SUBSYSTEM_LIST if comp != 'spinnaker'] subsystems.append('spinnaker-monitoring') if self.__options.container_builder: weighted_processes = self.__options.cpu_ratio * multiprocessing.cpu_count() pool = multiprocessing.pool.ThreadPool( processes=int(max(1, weighted_processes))) pool.map(self.__do_container_build, subsystems) if self.__build_failures: if set(self.__build_failures).intersection(set(subsystems)): raise RuntimeError('Builds failed for {0!r}'.format( self.__build_failures)) else: print 'Ignoring errors on optional subsystems {0!r}'.format( self.__build_failures) return def build_packages(self): """Build all the Spinnaker packages.""" all_subsystems = [] all_subsystems.extend(SUBSYSTEM_LIST) all_subsystems.extend(ADDITIONAL_SUBSYSTEMS) if self.__options.build: # Build in parallel using half available cores # to keep load in check. weighted_processes = self.__options.cpu_ratio * multiprocessing.cpu_count() pool = multiprocessing.pool.ThreadPool( processes=int(max(1, weighted_processes))) pool.map(self.__do_build, all_subsystems) if self.__build_failures: if set(self.__build_failures).intersection(set(SUBSYSTEM_LIST)): raise RuntimeError('Builds failed for {0!r}'.format( self.__build_failures)) else: print 'Ignoring errors on optional subsystems {0!r}'.format( self.__build_failures) if self.__options.nebula: return # Do not choke if there is nothing to copy wait_on = set(all_subsystems).difference(set(self.__build_failures)) if len(wait_on) > 0: pool = multiprocessing.pool.ThreadPool(processes=len(wait_on)) print 'Copying packages...' pool.map(self.__do_copy, wait_on) else: print 'Nothing to copy.' return def __do_copy(self, subsys): print 'Starting to copy {0}...'.format(subsys) if self.__options.platform == 'debian': pids = self.start_copy_debian_target(subsys) elif self.__options.platform == 'redhat': pids = self.start_copy_redhat_target(subsys) for p in pids: p.check_wait() print 'Finished copying {0}.'.format(subsys) @classmethod def init_argument_parser(cls, parser): refresh_source.Refresher.init_argument_parser(parser) parser.add_argument('--platform', default='debian', action='store', help='Select which platform to build for.' ' Valid options are: {}.'.format( ', '.join(VALID_PLATFORMS))) parser.add_argument('--build', default=True, action='store_true', help='Build the sources.') parser.add_argument('--info_gradle', default=False, action='store_true', help='Run gradle with --info.') parser.add_argument('--debug_gradle', default=False, action='store_true', help='Run gradle with --debug.') parser.add_argument( '--cpu_ratio', type=float, default=1.25, # 125% help='Number of concurrent threads as ratio of available cores.') parser.add_argument('--nobuild', dest='build', action='store_false') config_path = os.path.join(determine_project_root(), 'config') parser.add_argument( '--config_source', default=config_path, help='Path to directory for release config file templates.') parser.add_argument('--release_path', default='', help='Specifies the path to the release to build.' ' The release name is assumed to be the basename.' ' The path can be a directory, GCS URI or S3 URI.') parser.add_argument( '--gcb_project', default='', help='The google project id to publish containers to' 'if the container builder is gcp.') parser.add_argument( '--bintray_repo', default='', help='Publish to this bintray repo.\n' 'This requires BINTRAY_USER and BINTRAY_KEY are set.') parser.add_argument( '--jar_repo', default='', help='Publish produced jars to this repo.\n' 'This requires BINTRAY_USER and BINTRAY_KEY are set.') parser.add_argument( '--wipe_package_on_409', default=False, action='store_true', help='Work around BinTray conflict errors by deleting the entire package' ' and retrying. Removes all prior versions so only intended for dev' ' repos.\n') parser.add_argument( '--nowipe_package_on_409', dest='wipe_package_on_409', action='store_false') parser.add_argument( '--nebula', default=True, action='store_true', help='Use nebula to build "candidate" target and upload to bintray.') parser.add_argument( '--nonebula', dest='nebula', action='store_false', help='Explicitly "buildDeb" then curl upload them to bintray.') parser.add_argument( '--gcb_service_account', default='', help='Google service account to invoke the gcp container builder with.') parser.add_argument( '--gcb_service_account_json', default='', help='Path to service account credentials to invoke the gcp container builder with.') parser.add_argument( '--gcb_mirror_base_url', default='git@github.com:spinnaker-release', help='Base URL for the Spinnaker repositories GCB is configured to trigger builds from. Must use SSH protocol.') parser.add_argument( '--gradle_cache_path', default='{home}/.gradle'.format(home=os.environ.get('HOME', '')), help='Path to a gradle cache directory to use for the builds.') parser.add_argument( '--run_unit_tests', type=bool, default=False, help='Run unit tests during build for all components other than Deck.') def __verify_bintray(self): if not os.environ.get('BINTRAY_KEY', None): raise ValueError('BINTRAY_KEY environment variable not defined') if not os.environ.get('BINTRAY_USER', None): raise ValueError('BINTRAY_USER environment variable not defined') @classmethod def do_build(cls, options, build_number=None, container_builder=None): if options.build and not (options.bintray_repo): sys.stderr.write('ERROR: Missing a --bintray_repo') return -1 if options.platform not in VALID_PLATFORMS: sys.stderr.write('ERROR: {} is an invalid --platform. Please us one of {}' .format(options.platform, ', '.join(VALID_PLATFORMS))) return -1 builder = cls(options, build_number=build_number, container_builder=container_builder) if options.pull_origin: builder.refresher.pull_all_from_origin() builder.build_packages() if container_builder: builder.build_container_images() if options.build and options.bintray_repo: fd, temp_path = tempfile.mkstemp() with open(os.path.join(determine_project_root(), 'InstallSpinnaker.sh'), 'r') as f: content = f.read() match = re.search( 'REPOSITORY_URL="https://dl\.bintray\.com/(.+)"', content) content = ''.join([content[0:match.start(1)], options.bintray_repo, content[match.end(1):]]) os.write(fd, content) os.close(fd) try: builder.publish_install_script( os.path.join(determine_project_root(), temp_path)) finally: os.remove(temp_path) print '\nFINISHED writing release to {rep}'.format( rep=options.bintray_repo) @classmethod def main(cls): parser = argparse.ArgumentParser() cls.init_argument_parser(parser) options = parser.parse_args() cls.do_build(options) if __name__ == '__main__': sys.exit(Builder.main())
jtk54/spinnaker
dev/build_release.py
Python
apache-2.0
37,041
[ "ORCA" ]
e8facbbc97eba2f857f1f1520d1148ed1482903bd0531f0948954ffeae32f7f4
# Authors: Aaron Qiu <zqiu@ulg.ac.be>, # Antonio Sutera <a.sutera@ulg.ac.be>, # Arnaud Joly <a.joly@ulg.ac.be>, # Gilles Louppe <g.louppe@ulg.ac.be>, # Vincent Francois <v.francois@ulg.ac.be> # # License: BSD 3 clause from __future__ import division, print_function, absolute_import from itertools import chain import numpy as np from sklearn.externals.joblib import Parallel, delayed, cpu_count from utils import scale def _partition_X(X, n_jobs): """Private function used to partition X between jobs.""" n_nodes = X.shape[1] # Compute the number of jobs n_jobs = min(cpu_count() if n_jobs == -1 else n_jobs, n_nodes) # Partition estimators between jobs n_node_per_job = (n_nodes // n_jobs) * np.ones(n_jobs, dtype=np.int) n_node_per_job[:n_nodes % n_jobs] += 1 starts = np.cumsum(n_node_per_job) return n_jobs, [0] + starts.tolist() def _parallel_count(X, start, end): """Private function used to compute a batch of score within a job.""" count = np.zeros((end - start, X.shape[1])) for index, jx in enumerate(range(start, end)): X_jx_bot = X[:-1, jx] + 0.2 X_jx_top = X[:-1, jx] + 0.5 for j in range(X.shape[1]): if j == jx: continue count[index, j] = ((X[1:, j] > X_jx_bot) & (X[1:, j] < X_jx_top)).sum() return count def make_prediction_directivity(X, threshold=0.12, n_jobs=1): """Score neuron connectivity using a precedence measure Parameters ---------- X : numpy array of shape (n_samples, n_nodes) Fluorescence signals threshold : float, (default=0.11) Threshold value for hard thresholding filter: x_new[i] = x[i] if x[i] >= threshold else 0. n_jobs : integer, optional (default=1) The number of jobs to run the algorithm in parallel. If -1, then the number of jobs is set to the number of cores. Returns ------- score : numpy array of shape (n_nodes, n_nodes) Pairwise neuron connectivity score. """ # Perform filtering X_new = np.zeros((X.shape)) for i in range(1, X.shape[0] - 1): for j in range(X.shape[1]): X_new[i, j] = (X[i, j] + 1 * X[i - 1, j] + 0.8 * X[i - 2, j] + 0.4 * X[i - 3, j]) X_new = np.diff(X_new, axis=0) thresh1 = X_new < threshold * 1 thresh2 = X_new >= threshold * 1 X_new[thresh1] = 0 X_new[thresh2] = pow(X_new[thresh2], 0.9) # Score directivity n_jobs, starts = _partition_X(X, n_jobs) all_counts = Parallel(n_jobs=n_jobs)( delayed(_parallel_count)(X_new, starts[i], starts[i + 1]) for i in range(n_jobs)) count = np.vstack(list(chain.from_iterable(all_counts))) return scale(count - np.transpose(count))
orlandi/connectomicsPerspectivesPaper
participants_codes/aaagv/directivity.py
Python
mit
2,853
[ "NEURON" ]
93f4961c66dc4f184c288f4b85dc79a755619afd745bf43cb2f447235b8f1d54
ref = [] ref.append(['AMBER_apo','truncated.pdb','Crystal']) ref.append(['AMBER_apo','Avg_structure/021.150.avg_structure.pdb','Average Structure 021 - 150'])
rbdavid/RMSD_analyses
PCA_RMSD_Multi_Ref/ref_list.py
Python
gpl-3.0
163
[ "CRYSTAL" ]
67ddd114c0fa3b8a5345d29816e88ddcdbf4ec1e64cd9599ba5ddc248cadddcc
''' Feed-Foward Artificial Neural Network ------------------------------------- Feed-foward nets are the simplest NN's to master. They are comprised of an input layer, one or more hidden layers and an output layer. Since the dimensionality of out data is 2 inputs to 1 output, there will be 2 input neruons and a single output neuron. For sake of simplicty this net will restrict itself to a single hidden layer (deep belief networks can be for another time). This model revolves around on estimating your SAT score based on the amount of hours you slept and the amount of hours you studied the night before. For more information including the theory papers for the algorthms behind the backpropagation refer to the user manual. This software does requires 2 dependencies: > Numpy Library (https://docs.scipy.org/doc/numpy-1.13.0/user/install.html) > Scipy Library (https://www.scipy.org/install.html) Python Version: 3.6 28.07.2017 | Oakhill College | SDD | Open Source Software (C) | Lucas Barbosa ''' # dependencies for operation import sys import numpy as np from scipy import optimize class Neural_Network(object): def __init__(self, learning_rate=0): # define hyperparameters self.input_layer_size = 2 self.hidden_layer_size = 3 self.output_layer_size = 1 # define parameters self.W1 = np.random.randn(self.input_layer_size, self.hidden_layer_size) self.W2 = np.random.randn(self.hidden_layer_size, self.output_layer_size) # regularization parameter self.learning_rate = learning_rate # forward propagation def forward(self, X): self.z2 = np.dot(X, self.W1) self.a2 = self.sigmoid(self.z2) self.z3 = np.dot(self.a2, self.W2) prediction = self.sigmoid(self.z3) return prediction # activation functions def sigmoid(self, z): return 1 / (1 + np.exp(-z)) # derivative of sigmoid function def sigmoid_prime(self, z): return np.exp(-z) / ((1 + np.exp(-z))**2) # efficient backprop def cost_function(self, X, desired_output): self.prediction = self.forward(X) total_error = ((1/2) * sum((desired_output - self.prediction)**2)) / X.shape[0] + \ (self.learning_rate / 2) * (np.sum(self.W1**2) + np.sum(self.W2**2)) return total_error def cost_function_prime(self, X, desired_y): self.prediction = self.forward(X) # layer 3 backprop error l3_backprop_error = np.multiply(-(desired_y - self.prediction), \ self.sigmoid_prime(self.z3)) # divide by X.shape[0] to account for the scale of the data cost_in_terms_of_W2 = np.dot(self.a2.T, l3_backprop_error) / X.shape[0] + \ (self.learning_rate * self.W2) # layer 2 backprop error l2_backprop_error = np.dot(l3_backprop_error, self.W2.T) * \ self.sigmoid_prime(self.z2) # divide by X.shape[0] to account for the scale of the data cost_in_terms_of_W1 = np.dot(X.T, l2_backprop_error) / X.shape[0] + \ (self.learning_rate * self.W1) return cost_in_terms_of_W1, cost_in_terms_of_W2 # altering and setting the parameters during training def get_params(self): # get W1 & W2 rolled into a vector params = np.concatenate((self.W1.ravel(), self.W2.ravel())) return params def set_params(self, params): # set W1 & W2 using single parameter vector W1_start = 0 W1_end = self.hidden_layer_size * self.input_layer_size # reshape the W1 weights self.W1 = np.reshape(params[W1_start : W1_end], \ (self.input_layer_size, self.hidden_layer_size)) W2_end = W1_end + self.hidden_layer_size * self.output_layer_size # reshape the W2 weights self.W2 = np.reshape(params[W1_end : W2_end], \ (self.hidden_layer_size, self.output_layer_size)) def compute_gradient(self, X, desired_y): cost_in_terms_of_W1, cost_in_terms_of_W2 = self.cost_function_prime(X, desired_y) return np.concatenate((cost_in_terms_of_W1.ravel(), cost_in_terms_of_W2.ravel())) class Helper(object): def __init__(self, Local_Ref): # set a local reference to NN class self.Local_Ref = Local_Ref # normalize data to account for different units def scale_data(self, hours, test_score): MAX_SCORE = 100. hours /= np.amax(hours, axis=0) test_score /= MAX_SCORE return hours, test_score # print out the results of the NN's predicitons def print_predictions(self, train_x, train_y): print("="*50) print("Expected Scores:") for i in range(0, len(train_y)): print(int(train_y[i] * 100), "/100", sep="") print("="*50) predictions = NN.forward(train_x) print("Predicted Scores:") for i in range(0, len(train_x)): print(int(predictions[i] * 100), "/100", sep="") print("="*50) # checking gradients with numerical gradient computation avoiding logic errors def compute_numerical_gradient(self, X, desired_y): initial_params = self.Local_Ref.get_params() numerical_gradient = np.zeros(initial_params.shape) perturb = np.zeros(initial_params.shape) # epsilon value needs to be small enough act as a 'zero' epsilon = 1e-4 for i in range(len(initial_params)): # set perturbation vector to alter the original state of the initial params perturb[i] = epsilon self.Local_Ref.set_params(initial_params + perturb) loss_2 = self.Local_Ref.cost_function(X, desired_y) self.Local_Ref.set_params(initial_params - perturb) loss_1 = self.Local_Ref.cost_function(X, desired_y) # computer numerical gradient numerical_gradient[i] = (loss_2 - loss_1) / (2 * epsilon) perturb[i] = 0 self.Local_Ref.set_params(initial_params) return numerical_gradient class Trainer(object): def __init__(self, Local_Ref): # make local reference to NN self.Local_Ref = Local_Ref def cost_function_wrapper(self, params, X, desired_y): self.Local_Ref.set_params(params) total_cost = self.Local_Ref.cost_function(X, desired_y) gradient = self.Local_Ref.compute_gradient(X, desired_y) return total_cost, gradient # track cost function value as training progresses def callback(self, params): self.Local_Ref.set_params(params) self.cost_list.append(self.Local_Ref.cost_function(self.train_x, self.train_y)) self.test_cost_list.append(self.Local_Ref.cost_function(self.test_x, self.test_y)) def train(self, train_x, train_y, test_x, test_y): # internal variable for callback function self.train_x = train_x self.train_y = train_y self.test_x = test_x self.test_y = test_y # empty lists to store costs self.cost_list = [] self.test_cost_list = [] initial_params = self.Local_Ref.get_params() # using scipy's built in Quasi-Newton BFGS mathematical optimization algorithm options = {"maxiter": 200, "disp": True} _result = optimize.minimize(self.cost_function_wrapper, initial_params, jac=True, \ method="BFGS", args=(train_x, train_y), options=options, \ callback=self.callback) # once the training is complete finally set the new values of the parameters in self.Local_Ref.set_params(_result.x) self.optimization_results = _result if __name__ == "__main__": # check if numpy and scipy are installed before running any code if "numpy" not in sys.modules or "scipy" not in sys.modules: raise AssertionError("The required dependencies have not been imported.") # training data train_x = np.array(([3,5],[5,1],[10,2],[6,1.5]), dtype=float) train_y = np.array(([75],[82],[93],[70]), dtype=float) # testing data test_x = np.array(([4, 5.5],[4.5, 1],[9,2.5],[6,2]), dtype=float) test_y = np.array(([70],[89],[85],[75]), dtype=float) # initialize all the classes NN = Neural_Network(learning_rate=0.0001) Aux = Helper(NN) T1 = Trainer(NN) # normalize data train_x, train_y = Aux.scale_data(train_x, train_y) test_x, test_y = Aux.scale_data(test_x, test_y) # check to see gradients have been correctly calculated numerical_gradient = Aux.compute_numerical_gradient(train_x, train_y) computed_gradient = NN.compute_gradient(train_x, train_y) # train the network T1.train(train_x, train_y, test_x, test_y) # observe the results of the tests on above datasets Aux.print_predictions(train_x, train_y)
lucasbrsa/MLANN-1
src/feed_forward_net.py
Python
gpl-3.0
9,071
[ "NEURON" ]
677425804ef03920f6c4d30b9089fcab8b22f25bb63566b00bfaad7ddb82b159
""" TaskQueueDB class is a front-end to the task queues db """ from __future__ import absolute_import from __future__ import division from __future__ import print_function __RCSID__ = "$Id" import six import random import string from DIRAC import gConfig, S_OK, S_ERROR from DIRAC.Core.Base.DB import DB from DIRAC.Core.Utilities import List from DIRAC.Core.Utilities.PrettyPrint import printDict from DIRAC.Core.Utilities.DictCache import DictCache from DIRAC.Core.Security import Properties from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations from DIRAC.ConfigurationSystem.Client.Helpers import Registry from DIRAC.WorkloadManagementSystem.private.SharesCorrector import SharesCorrector DEFAULT_GROUP_SHARE = 1000 TQ_MIN_SHARE = 0.001 # For checks at insertion time, and not only singleValueDefFields = ('OwnerDN', 'OwnerGroup', 'Setup', 'CPUTime') multiValueDefFields = ('Sites', 'GridCEs', 'BannedSites', 'Platforms', 'SubmitPools', 'JobTypes', 'Tags') # Used for matching multiValueMatchFields = ('GridCE', 'Site', 'Platform', 'SubmitPool', 'JobType', 'Tag') bannedJobMatchFields = ('Site', ) mandatoryMatchFields = ('Setup', 'CPUTime') priorityIgnoredFields = ('Sites', 'BannedSites') def _lowerAndRemovePunctuation(s): if six.PY3: table = str.maketrans("", "", string.punctuation) # pylint: disable=no-member return s.lower().translate(table) else: return s.lower().translate(None, string.punctuation) class TaskQueueDB(DB): """ MySQL DB of "Task Queues" """ def __init__(self): DB.__init__(self, 'TaskQueueDB', 'WorkloadManagement/TaskQueueDB') self.__maxJobsInTQ = 5000 self.__defaultCPUSegments = [6 * 60, 30 * 60, 1 * 3600, 6 * 3600, 12 * 3600, 1 * 86400, 2 * 86400, 3 * 86400, 4 * 86400, 6 * 86400, 8 * 86400, 10 * 86400, int(12.5 * 86400)] self.__maxMatchRetry = 3 self.__jobPriorityBoundaries = (0.001, 10) self.__groupShares = {} self.__deleteTQWithDelay = DictCache(self.__deleteTQIfEmpty) self.__opsHelper = Operations() self.__ensureInsertionIsSingle = False self.__sharesCorrector = SharesCorrector(self.__opsHelper) result = self.__initializeDB() if not result['OK']: raise Exception("Can't create tables: %s" % result['Message']) def enableAllTaskQueues(self): """ Enable all Task queues """ return self.updateFields("tq_TaskQueues", updateDict={"Enabled": "1"}) def findOrphanJobs(self): """ Find jobs that are not in any task queue """ result = self._query("select JobID from tq_Jobs WHERE TQId not in (SELECT TQId from tq_TaskQueues)") if not result['OK']: return result return S_OK([row[0] for row in result['Value']]) def isSharesCorrectionEnabled(self): return self.__getCSOption("EnableSharesCorrection", False) def __getCSOption(self, optionName, defValue): return self.__opsHelper.getValue("JobScheduling/%s" % optionName, defValue) def getValidPilotTypes(self): return self.__getCSOption("AllPilotTypes", ['private']) def __initializeDB(self): """ Create the tables """ result = self._query("show tables") if not result['OK']: return result tablesInDB = [t[0] for t in result['Value']] tablesToCreate = {} self.__tablesDesc = {} self.__tablesDesc['tq_TaskQueues'] = {'Fields': {'TQId': 'INTEGER(11) UNSIGNED AUTO_INCREMENT NOT NULL', 'OwnerDN': 'VARCHAR(255) NOT NULL', 'OwnerGroup': 'VARCHAR(32) NOT NULL', 'Setup': 'VARCHAR(32) NOT NULL', 'CPUTime': 'BIGINT(20) UNSIGNED NOT NULL', 'Priority': 'FLOAT NOT NULL', 'Enabled': 'TINYINT(1) NOT NULL DEFAULT 0' }, 'PrimaryKey': 'TQId', 'Indexes': {'TQOwner': ['OwnerDN', 'OwnerGroup', 'Setup', 'CPUTime'] } } self.__tablesDesc['tq_Jobs'] = {'Fields': {'TQId': 'INTEGER(11) UNSIGNED NOT NULL', 'JobId': 'INTEGER(11) UNSIGNED NOT NULL', 'Priority': 'INTEGER UNSIGNED NOT NULL', 'RealPriority': 'FLOAT NOT NULL' }, 'PrimaryKey': 'JobId', 'Indexes': {'TaskIndex': ['TQId']}, 'ForeignKeys': {'TQId': 'tq_TaskQueues.TQId'} } for multiField in multiValueDefFields: tableName = 'tq_TQTo%s' % multiField self.__tablesDesc[tableName] = {'Fields': {'TQId': 'INTEGER(11) UNSIGNED NOT NULL', 'Value': 'VARCHAR(64) NOT NULL' }, 'PrimaryKey': ['TQId', 'Value'], 'Indexes': {'TaskIndex': ['TQId'], '%sIndex' % multiField: ['Value']}, 'ForeignKeys': {'TQId': 'tq_TaskQueues.TQId'} } for tableName in self.__tablesDesc: if tableName not in tablesInDB: tablesToCreate[tableName] = self.__tablesDesc[tableName] return self._createTables(tablesToCreate) def getGroupsInTQs(self): cmdSQL = "SELECT DISTINCT( OwnerGroup ) FROM `tq_TaskQueues`" result = self._query(cmdSQL) if not result['OK']: return result return S_OK([row[0] for row in result['Value']]) def fitCPUTimeToSegments(self, cpuTime): """ Fit the CPU time to the valid segments """ maxCPUSegments = self.__getCSOption("taskQueueCPUTimeIntervals", self.__defaultCPUSegments) try: maxCPUSegments = [int(seg) for seg in maxCPUSegments] # Check segments in the CS last = 0 for cpuS in maxCPUSegments: if cpuS <= last: maxCPUSegments = self.__defaultCPUSegments break last = cpuS except Exception: maxCPUSegments = self.__defaultCPUSegments # Map to a segment for cpuSegment in maxCPUSegments: if cpuTime <= cpuSegment: return cpuSegment return maxCPUSegments[-1] def _checkTaskQueueDefinition(self, tqDefDict): """ Check a task queue definition dict is valid """ for field in singleValueDefFields: if field not in tqDefDict: return S_ERROR("Missing mandatory field '%s' in task queue definition" % field) if field in ["CPUTime"]: if not isinstance(tqDefDict[field], six.integer_types): return S_ERROR("Mandatory field %s value type is not valid: %s" % (field, type(tqDefDict[field]))) else: if not isinstance(tqDefDict[field], six.string_types): return S_ERROR("Mandatory field %s value type is not valid: %s" % (field, type(tqDefDict[field]))) result = self._escapeString(tqDefDict[field]) if not result['OK']: return result tqDefDict[field] = result['Value'] for field in multiValueDefFields: if field not in tqDefDict: continue if not isinstance(tqDefDict[field], (list, tuple)): return S_ERROR("Multi value field %s value type is not valid: %s" % (field, type(tqDefDict[field]))) result = self._escapeValues(tqDefDict[field]) if not result['OK']: return result tqDefDict[field] = result['Value'] return S_OK(tqDefDict) def _checkMatchDefinition(self, tqMatchDict): """ Check a task queue match dict is valid """ def travelAndCheckType(value, validTypes, escapeValues=True): if isinstance(value, (list, tuple)): for subValue in value: if not isinstance(subValue, validTypes): return S_ERROR("List contained type %s is not valid -> %s" % (type(subValue), validTypes)) if escapeValues: return self._escapeValues(value) return S_OK(value) else: if not isinstance(value, validTypes): return S_ERROR("Type %s is not valid -> %s" % (type(value), validTypes)) if escapeValues: return self._escapeString(value) return S_OK(value) for field in singleValueDefFields: if field not in tqMatchDict: if field in mandatoryMatchFields: return S_ERROR("Missing mandatory field '%s' in match request definition" % field) continue fieldValue = tqMatchDict[field] if field in ["CPUTime"]: result = travelAndCheckType(fieldValue, six.integer_types, escapeValues=False) else: result = travelAndCheckType(fieldValue, six.string_types) if not result['OK']: return S_ERROR("Match definition field %s failed : %s" % (field, result['Message'])) tqMatchDict[field] = result['Value'] # Check multivalue for multiField in multiValueMatchFields: for field in (multiField, "Banned%s" % multiField, "Required%s" % multiField): if field in tqMatchDict: fieldValue = tqMatchDict[field] result = travelAndCheckType(fieldValue, six.string_types) if not result['OK']: return S_ERROR("Match definition field %s failed : %s" % (field, result['Message'])) tqMatchDict[field] = result['Value'] return S_OK(tqMatchDict) def __createTaskQueue(self, tqDefDict, priority=1, connObj=False): """ Create a task queue :returns: S_OK( tqId ) / S_ERROR """ if not connObj: result = self._getConnection() if not result['OK']: return S_ERROR("Can't create task queue: %s" % result['Message']) connObj = result['Value'] tqDefDict['CPUTime'] = self.fitCPUTimeToSegments(tqDefDict['CPUTime']) sqlSingleFields = ['TQId', 'Priority'] sqlValues = ["0", str(priority)] for field in singleValueDefFields: sqlSingleFields.append(field) sqlValues.append(tqDefDict[field]) # Insert the TQ Disabled sqlSingleFields.append("Enabled") sqlValues.append("0") cmd = "INSERT INTO tq_TaskQueues ( %s ) VALUES ( %s )" % ( ", ".join(sqlSingleFields), ", ".join([str(v) for v in sqlValues])) result = self._update(cmd, conn=connObj) if not result['OK']: self.log.error("Can't insert TQ in DB", result['Value']) return result if 'lastRowId' in result: tqId = result['lastRowId'] else: result = self._query("SELECT LAST_INSERT_ID()", conn=connObj) if not result['OK']: self.cleanOrphanedTaskQueues(connObj=connObj) return S_ERROR("Can't determine task queue id after insertion") tqId = result['Value'][0][0] for field in multiValueDefFields: if field not in tqDefDict: continue values = List.uniqueElements([value for value in tqDefDict[field] if value.strip()]) if not values: continue cmd = "INSERT INTO `tq_TQTo%s` ( TQId, Value ) VALUES " % field cmd += ", ".join(["( %s, %s )" % (tqId, str(value)) for value in values]) result = self._update(cmd, conn=connObj) if not result['OK']: self.log.error("Failed to insert condition", "%s : %s" % field, result['Message']) self.cleanOrphanedTaskQueues(connObj=connObj) return S_ERROR("Can't insert values %s for field %s: %s" % (str(values), field, result['Message'])) self.log.info("Created TQ", tqId) return S_OK(tqId) def cleanOrphanedTaskQueues(self, connObj=False): """ Delete all empty task queues """ self.log.info("Cleaning orphaned TQs") sq = "SELECT TQId FROM `tq_TaskQueues` WHERE Enabled >= 1 AND TQId not in ( SELECT DISTINCT TQId from `tq_Jobs` )" result = self._query(sq, conn=connObj) if not result['OK']: return result orphanedTQs = result['Value'] if not orphanedTQs: return S_OK() orphanedTQs = [str(otq[0]) for otq in orphanedTQs] for mvField in multiValueDefFields: result = self._update( "DELETE FROM `tq_TQTo%s` WHERE TQId in ( %s )" % (mvField, ','.join(orphanedTQs)), conn=connObj) if not result['OK']: return result result = self._update( "DELETE FROM `tq_TaskQueues` WHERE TQId in ( %s )" % ','.join(orphanedTQs), conn=connObj) if not result['OK']: return result return S_OK() def __setTaskQueueEnabled(self, tqId, enabled=True, connObj=False): if enabled: enabled = "+ 1" else: enabled = "- 1" upSQL = "UPDATE `tq_TaskQueues` SET Enabled = Enabled %s WHERE TQId=%d" % (enabled, tqId) result = self._update(upSQL, conn=connObj) if not result['OK']: self.log.error("Error setting TQ state", "TQ %s State %s: %s" % (tqId, enabled, result['Message'])) return result updated = result['Value'] > 0 if updated: self.log.verbose("Set enabled for TQ", "(%s for TQ %s)" % (enabled, tqId)) return S_OK(updated) def __hackJobPriority(self, jobPriority): jobPriority = min(max(int(jobPriority), self.__jobPriorityBoundaries[0]), self.__jobPriorityBoundaries[1]) if jobPriority == self.__jobPriorityBoundaries[0]: return 10 ** (-5) if jobPriority == self.__jobPriorityBoundaries[1]: return 10 ** 6 return jobPriority def insertJob(self, jobId, tqDefDict, jobPriority, skipTQDefCheck=False): """ Insert a job in a task queue (creating one if it doesn't exit) :param int jobId: job ID :param dict tqDefDict: dict for TQ definition :param int jobPriority: integer that defines the job priority :returns: S_OK() / S_ERROR """ try: int(jobId) except ValueError: return S_ERROR("JobId is not a number!") retVal = self._getConnection() if not retVal['OK']: return S_ERROR("Can't insert job: %s" % retVal['Message']) connObj = retVal['Value'] if not skipTQDefCheck: tqDefDict = dict(tqDefDict) retVal = self._checkTaskQueueDefinition(tqDefDict) if not retVal['OK']: self.log.error("TQ definition check failed", retVal['Message']) return retVal tqDefDict = retVal['Value'] tqDefDict['CPUTime'] = self.fitCPUTimeToSegments(tqDefDict['CPUTime']) self.log.info("Inserting job with requirements", "(%s : %s)" % (jobId, printDict(tqDefDict))) retVal = self.__findAndDisableTaskQueue(tqDefDict, skipDefinitionCheck=True, connObj=connObj) if not retVal['OK']: return retVal tqInfo = retVal['Value'] newTQ = False if not tqInfo['found']: self.log.info("Creating a TQ for job", jobId) retVal = self.__createTaskQueue(tqDefDict, 1, connObj=connObj) if not retVal['OK']: return retVal tqId = retVal['Value'] newTQ = True else: tqId = tqInfo['tqId'] self.log.info("Found TQ for job requirements", "(%s : %s)" % (tqId, jobId)) try: result = self.__insertJobInTaskQueue(jobId, tqId, int(jobPriority), checkTQExists=False, connObj=connObj) if not result['OK']: self.log.error("Error inserting job in TQ", "Job %s TQ %s: %s" % (jobId, tqId, result['Message'])) return result if newTQ: self.recalculateTQSharesForEntity(tqDefDict['OwnerDN'], tqDefDict['OwnerGroup'], connObj=connObj) finally: self.__setTaskQueueEnabled(tqId, True) return S_OK() def __insertJobInTaskQueue(self, jobId, tqId, jobPriority, checkTQExists=True, connObj=False): """ Insert a job in a given task queue :param int jobId: job ID :param dict tqDefDict: dict for TQ definition :param int jobPriority: integer that defines the job priority :returns: S_OK() / S_ERROR """ self.log.info("Inserting job in TQ with priority", "(%s : %s : %s)" % (jobId, tqId, jobPriority)) if not connObj: result = self._getConnection() if not result['OK']: return S_ERROR("Can't insert job: %s" % result['Message']) connObj = result['Value'] if checkTQExists: result = self._query("SELECT tqId FROM `tq_TaskQueues` WHERE TQId = %s" % tqId, conn=connObj) if not result['OK'] or not result['Value']: return S_OK("Can't find task queue with id %s: %s" % (tqId, result['Message'])) hackedPriority = self.__hackJobPriority(jobPriority) result = self._update("INSERT INTO tq_Jobs ( TQId, JobId, Priority, RealPriority ) \ VALUES ( %s, %s, %s, %f ) ON DUPLICATE KEY UPDATE TQId = %s, \ Priority = %s, RealPriority = %f" % (tqId, jobId, jobPriority, hackedPriority, tqId, jobPriority, hackedPriority), conn=connObj) if not result['OK']: return result return S_OK() def __generateTQFindSQL(self, tqDefDict, skipDefinitionCheck=False): """ Generate the SQL to find a task queue that has exactly the given requirements :param dict tqDefDict: dict for TQ definition :returns: S_OK() / S_ERROR """ if not skipDefinitionCheck: tqDefDict = dict(tqDefDict) result = self._checkTaskQueueDefinition(tqDefDict) if not result['OK']: return result tqDefDict = result['Value'] sqlCondList = [] for field in singleValueDefFields: sqlCondList.append("`tq_TaskQueues`.%s = %s" % (field, tqDefDict[field])) # MAGIC SUBQUERIES TO ENSURE STRICT MATCH for field in multiValueDefFields: tableName = '`tq_TQTo%s`' % field if field in tqDefDict and tqDefDict[field]: firstQuery = "SELECT COUNT(%s.Value) \ FROM %s \ WHERE %s.TQId = `tq_TaskQueues`.TQId" % (tableName, tableName, tableName) grouping = "GROUP BY %s.TQId" % tableName valuesList = List.uniqueElements([value.strip() for value in tqDefDict[field] if value.strip()]) numValues = len(valuesList) secondQuery = "%s AND %s.Value in (%s)" % (firstQuery, tableName, ",".join(["%s" % str(value) for value in valuesList])) sqlCondList.append("%s = (%s %s)" % (numValues, firstQuery, grouping)) sqlCondList.append("%s = (%s %s)" % (numValues, secondQuery, grouping)) else: sqlCondList.append("`tq_TaskQueues`.TQId not in ( SELECT DISTINCT %s.TQId from %s )" % (tableName, tableName)) # END MAGIC: That was easy ;) return S_OK(" AND ".join(sqlCondList)) def __findAndDisableTaskQueue(self, tqDefDict, skipDefinitionCheck=False, retries=10, connObj=False): """ Disable and find TQ :param dict tqDefDict: dict for TQ definition :returns: S_OK() / S_ERROR """ for _ in range(retries): result = self.__findSmallestTaskQueue(tqDefDict, skipDefinitionCheck=skipDefinitionCheck, connObj=connObj) if not result['OK']: return result data = result['Value'] if not data['found']: return result if data['enabled'] < 1: self.log.debug("TaskQueue {tqId} seems to be already disabled ({enabled})".format(**data)) result = self.__setTaskQueueEnabled(data['tqId'], False) if result['OK']: return S_OK(data) return S_ERROR("Could not disable TQ") def __findSmallestTaskQueue(self, tqDefDict, skipDefinitionCheck=False, connObj=False): """ Find a task queue that has at least the given requirements :param dict tqDefDict: dict for TQ definition :returns: S_OK() / S_ERROR """ result = self.__generateTQFindSQL(tqDefDict, skipDefinitionCheck=skipDefinitionCheck) if not result['OK']: return result sqlCmd = "SELECT COUNT( `tq_Jobs`.JobID ), `tq_TaskQueues`.TQId, `tq_TaskQueues`.Enabled \ FROM `tq_TaskQueues`, `tq_Jobs`" sqlCmd = "%s WHERE `tq_TaskQueues`.TQId = `tq_Jobs`.TQId AND %s GROUP BY `tq_Jobs`.TQId \ ORDER BY COUNT( `tq_Jobs`.JobID ) ASC" % (sqlCmd, result['Value']) result = self._query(sqlCmd, conn=connObj) if not result['OK']: self.log.error("Can't find task queue", result['Message']) return result data = result['Value'] if not data or data[0][0] >= self.__maxJobsInTQ: return S_OK({'found': False}) return S_OK({'found': True, 'tqId': data[0][1], 'enabled': data[0][2], 'jobs': data[0][0]}) def matchAndGetJob(self, tqMatchDict, numJobsPerTry=50, numQueuesPerTry=10, negativeCond=None): """ Match a job based on requirements :param dict tqDefDict: dict for TQ definition :returns: S_OK() / S_ERROR """ if negativeCond is None: negativeCond = {} # Make a copy to avoid modification of original if escaping needs to be done tqMatchDict = dict(tqMatchDict) retVal = self._checkMatchDefinition(tqMatchDict) if not retVal['OK']: self.log.error("TQ match request check failed", retVal['Message']) return retVal retVal = self._getConnection() if not retVal['OK']: return S_ERROR("Can't connect to DB: %s" % retVal['Message']) connObj = retVal['Value'] preJobSQL = "SELECT `tq_Jobs`.JobId, `tq_Jobs`.TQId \ FROM `tq_Jobs` WHERE `tq_Jobs`.TQId = %s AND `tq_Jobs`.Priority = %s" prioSQL = "SELECT `tq_Jobs`.Priority FROM `tq_Jobs` \ WHERE `tq_Jobs`.TQId = %s ORDER BY RAND() / `tq_Jobs`.RealPriority ASC LIMIT 1" postJobSQL = " ORDER BY `tq_Jobs`.JobId ASC LIMIT %s" % numJobsPerTry for _ in range(self.__maxMatchRetry): noJobsFound = False if 'JobID' in tqMatchDict: # A certain JobID is required by the resource, so all TQ are to be considered retVal = self.matchAndGetTaskQueue(tqMatchDict, numQueuesToGet=0, skipMatchDictDef=True, connObj=connObj) preJobSQL = "%s AND `tq_Jobs`.JobId = %s " % (preJobSQL, tqMatchDict['JobID']) else: retVal = self.matchAndGetTaskQueue(tqMatchDict, numQueuesToGet=numQueuesPerTry, skipMatchDictDef=True, negativeCond=negativeCond, connObj=connObj) if not retVal['OK']: return retVal tqList = retVal['Value'] if not tqList: self.log.info("No TQ matches requirements") return S_OK({'matchFound': False, 'tqMatch': tqMatchDict}) for tqId, tqOwnerDN, tqOwnerGroup in tqList: self.log.verbose("Trying to extract jobs from TQ", tqId) retVal = self._query(prioSQL % tqId, conn=connObj) if not retVal['OK']: return S_ERROR("Can't retrieve winning priority for matching job: %s" % retVal['Message']) if not retVal['Value']: noJobsFound = True continue prio = retVal['Value'][0][0] retVal = self._query("%s %s" % (preJobSQL % (tqId, prio), postJobSQL), conn=connObj) if not retVal['OK']: return S_ERROR("Can't begin transaction for matching job: %s" % retVal['Message']) jobTQList = [(row[0], row[1]) for row in retVal['Value']] if not jobTQList: self.log.info("Task queue seems to be empty, triggering a cleaning of", tqId) self.__deleteTQWithDelay.add(tqId, 300, (tqId, tqOwnerDN, tqOwnerGroup)) while jobTQList: jobId, tqId = jobTQList.pop(random.randint(0, len(jobTQList) - 1)) self.log.verbose("Trying to extract job from TQ", "%s : %s" % (jobId, tqId)) retVal = self.deleteJob(jobId, connObj=connObj) if not retVal['OK']: msgFix = "Could not take job" msgVar = " %s out from the TQ %s: %s" % (jobId, tqId, retVal['Message']) self.log.error(msgFix, msgVar) return S_ERROR(msgFix + msgVar) if retVal['Value']: self.log.info("Extracted job with prio from TQ", "(%s : %s : %s)" % (jobId, prio, tqId)) return S_OK({'matchFound': True, 'jobId': jobId, 'taskQueueId': tqId, 'tqMatch': tqMatchDict}) self.log.info("No jobs could be extracted from TQ", tqId) if noJobsFound: return S_OK({'matchFound': False, 'tqMatch': tqMatchDict}) self.log.info("Could not find a match after %s match retries" % self.__maxMatchRetry) return S_ERROR("Could not find a match after %s match retries" % self.__maxMatchRetry) def matchAndGetTaskQueue(self, tqMatchDict, numQueuesToGet=1, skipMatchDictDef=False, negativeCond=None, connObj=False): """ Get a queue that matches the requirements """ if negativeCond is None: negativeCond = {} # Make a copy to avoid modification of original if escaping needs to be done tqMatchDict = dict(tqMatchDict) if not skipMatchDictDef: retVal = self._checkMatchDefinition(tqMatchDict) if not retVal['OK']: return retVal retVal = self.__generateTQMatchSQL(tqMatchDict, numQueuesToGet=numQueuesToGet, negativeCond=negativeCond) if not retVal['OK']: return retVal matchSQL = retVal['Value'] retVal = self._query(matchSQL, conn=connObj) if not retVal['OK']: return retVal return S_OK([(row[0], row[1], row[2]) for row in retVal['Value']]) @staticmethod def __generateSQLSubCond(sqlString, value, boolOp='OR'): if not isinstance(value, (list, tuple)): return sqlString % str(value).strip() sqlORList = [] for v in value: sqlORList.append(sqlString % str(v).strip()) return "( %s )" % (" %s " % boolOp).join(sqlORList) def __generateNotSQL(self, negativeCond): """ Generate negative conditions Can be a list of dicts or a dict: - list of dicts will be OR of conditional dicts - dicts will be normal conditional dict ( kay1 in ( v1, v2, ... ) AND key2 in ( v3, v4, ... ) ) """ if isinstance(negativeCond, (list, tuple)): sqlCond = [] for cD in negativeCond: sqlCond.append(self.__generateNotDictSQL(cD)) return " ( %s )" % " OR ".join(sqlCond) elif isinstance(negativeCond, dict): return self.__generateNotDictSQL(negativeCond) raise RuntimeError("negativeCond has to be either a list or a dict or a tuple, and it's %s" % type(negativeCond)) def __generateNotDictSQL(self, negativeCond): """ Generate the negative sql condition from a standard condition dict not ( cond1 and cond2 ) = ( not cond1 or not cond 2 ) For instance: { 'Site': 'S1', 'JobType': [ 'T1', 'T2' ] } ( not 'S1' in Sites or ( not 'T1' in JobType and not 'T2' in JobType ) ) S2 T1 -> not False or ( not True and not False ) -> True or ... -> True -> Eligible S1 T3 -> not True or ( not False and not False ) -> False or (True and True ) -> True -> Eligible S1 T1 -> not True or ( not True and not False ) -> False or ( False and True ) -> False -> Nop """ condList = [] for field in negativeCond: if field in multiValueMatchFields: fullTableN = '`tq_TQTo%ss`' % field valList = negativeCond[field] if not isinstance(valList, (list, tuple)): valList = (valList, ) subList = [] for value in valList: value = self._escapeString(value)['Value'] sql = "%s NOT IN ( SELECT %s.Value FROM %s WHERE %s.TQId = tq.TQId )" % (value, fullTableN, fullTableN, fullTableN) subList.append(sql) condList.append("( %s )" % " AND ".join(subList)) elif field in singleValueDefFields: for value in negativeCond[field]: value = self._escapeString(value)['Value'] sql = "%s != tq.%s " % (value, field) condList.append(sql) return "( %s )" % " OR ".join(condList) @staticmethod def __generateTablesName(sqlTables, field): fullTableName = 'tq_TQTo%ss' % field if fullTableName not in sqlTables: tableN = field.lower() sqlTables[fullTableName] = tableN return tableN, "`%s`" % fullTableName, return sqlTables[fullTableName], "`%s`" % fullTableName def __generateTQMatchSQL(self, tqMatchDict, numQueuesToGet=1, negativeCond=None): """ Generate the SQL needed to match a task queue """ self.log.debug(tqMatchDict) if negativeCond is None: negativeCond = {} # Only enabled TQs sqlCondList = [] sqlTables = {"tq_TaskQueues": "tq"} # If OwnerDN and OwnerGroup are defined only use those combinations that make sense if 'OwnerDN' in tqMatchDict and 'OwnerGroup' in tqMatchDict: groups = tqMatchDict['OwnerGroup'] if not isinstance(groups, (list, tuple)): groups = [groups] dns = tqMatchDict['OwnerDN'] if not isinstance(dns, (list, tuple)): dns = [dns] ownerConds = [] for group in groups: if Properties.JOB_SHARING in Registry.getPropertiesForGroup(group.replace('"', "")): ownerConds.append("tq.OwnerGroup = %s" % group) else: for dn in dns: ownerConds.append("( tq.OwnerDN = %s AND tq.OwnerGroup = %s )" % (dn, group)) sqlCondList.append(" OR ".join(ownerConds)) else: # If not both are defined, just add the ones that are defined for field in ('OwnerGroup', 'OwnerDN'): if field in tqMatchDict: sqlCondList.append(self.__generateSQLSubCond("tq.%s = %%s" % field, tqMatchDict[field])) # Type of single value conditions for field in ('CPUTime', 'Setup'): if field in tqMatchDict: if field == 'CPUTime': sqlCondList.append(self.__generateSQLSubCond("tq.%s <= %%s" % field, tqMatchDict[field])) else: sqlCondList.append(self.__generateSQLSubCond("tq.%s = %%s" % field, tqMatchDict[field])) tag_fv = [] # Match multi value fields for field in multiValueMatchFields: self.log.debug("Evaluating field %s" % field) # It has to be %ss , with an 's' at the end because the columns names # are plural and match options are singular # Just treating the (not so) special case of no Tag, No RequiredTag if 'Tag' not in tqMatchDict and 'RequiredTag' not in tqMatchDict: tqMatchDict['Tag'] = [] if field in tqMatchDict: self.log.debug("Evaluating %s with value %s" % (field, tqMatchDict[field])) _, fullTableN = self.__generateTablesName(sqlTables, field) sqlMultiCondList = [] csql = None # Now evaluating Tags if field == 'Tag': tag_fv = tqMatchDict.get('Tag') self.log.debug("Evaluating tag %s of type %s" % (tag_fv, type(tag_fv))) if isinstance(tag_fv, str): tag_fv = [tag_fv] # Is there something to consider? if any(_lowerAndRemovePunctuation(fvx) == 'any' for fvx in tag_fv): continue else: sqlMultiCondList.append(self.__generateTagSQLSubCond(fullTableN, tag_fv)) # Now evaluating everything that is not tags else: fv = tqMatchDict.get(field) self.log.debug("Evaluating field %s of type %s" % (field, type(fv))) # Is there something to consider? if not fv: continue if isinstance(fv, str) and _lowerAndRemovePunctuation(fv) == 'any': continue if isinstance(fv, list) and any(_lowerAndRemovePunctuation(fvx) == 'any' for fvx in fv): continue # if field != 'GridCE' or 'Site' in tqMatchDict: # Jobs for masked sites can be matched if they specified a GridCE # Site is removed from tqMatchDict if the Site is mask. In this case we want # that the GridCE matches explicitly so the COUNT can not be 0. In this case we skip this # condition sqlMultiCondList.append("( SELECT COUNT(%s.Value) FROM %s WHERE %s.TQId = tq.TQId ) = 0" % (fullTableN, fullTableN, fullTableN)) sqlMultiCondList.append(self.__generateSQLSubCond("%%s IN ( SELECT %s.Value \ FROM %s \ WHERE %s.TQId = tq.TQId )" % (fullTableN, fullTableN, fullTableN), tqMatchDict.get(field))) sqlCondList.append("( %s )" % " OR ".join(sqlMultiCondList)) # In case of Site, check it's not in job banned sites if field in bannedJobMatchFields: fullTableN = '`tq_TQToBanned%ss`' % field csql = self.__generateSQLSubCond("%%s not in ( SELECT %s.Value \ FROM %s \ WHERE %s.TQId = tq.TQId )" % (fullTableN, fullTableN, fullTableN), tqMatchDict[field], boolOp='OR') sqlCondList.append(csql) # Add possibly RequiredTag conditions rtag_fv = tqMatchDict.get('RequiredTag', []) if isinstance(rtag_fv, str): rtag_fv = [rtag_fv] # Is there something to consider? if not rtag_fv or any(_lowerAndRemovePunctuation(fv) == 'any' for fv in rtag_fv): pass elif not set(rtag_fv).issubset(set(tag_fv)): return S_ERROR('Wrong conditions') else: self.log.debug("Evaluating RequiredTag %s" % rtag_fv) sqlCondList.append(self.__generateRequiredTagSQLSubCond('`tq_TQToTags`', rtag_fv)) # Add possibly Resource banning conditions for field in multiValueMatchFields: bannedField = "Banned%s" % field # Is there something to consider? b_fv = tqMatchDict.get(bannedField) if not b_fv \ or isinstance(b_fv, str) and _lowerAndRemovePunctuation(b_fv) == 'any' \ or isinstance(b_fv, list) \ and any(_lowerAndRemovePunctuation(fvx) == 'any' for fvx in b_fv): continue fullTableN = '`tq_TQTo%ss`' % field sqlCondList.append(self.__generateSQLSubCond("%%s not in ( SELECT %s.Value \ FROM %s \ WHERE %s.TQId = tq.TQId )" % (fullTableN, fullTableN, fullTableN), b_fv, boolOp='OR')) # Add extra negative conditions if negativeCond: sqlCondList.append(self.__generateNotSQL(negativeCond)) # Generate the final query string tqSqlCmd = "SELECT tq.TQId, tq.OwnerDN, tq.OwnerGroup FROM `tq_TaskQueues` tq WHERE %s" % ( " AND ".join(sqlCondList)) # Apply priorities tqSqlCmd = "%s ORDER BY RAND() / tq.Priority ASC" % tqSqlCmd # Do we want a limit? if numQueuesToGet: tqSqlCmd = "%s LIMIT %s" % (tqSqlCmd, numQueuesToGet) return S_OK(tqSqlCmd) @staticmethod def __generateTagSQLSubCond(tableName, tagMatchList): """ Generate SQL condition where ALL the specified multiValue requirements must be present in the matching resource list """ sql1 = "SELECT COUNT(%s.Value) FROM %s WHERE %s.TQId=tq.TQId" % (tableName, tableName, tableName) if not tagMatchList: sql2 = sql1 + " AND %s.Value=''" % tableName else: if isinstance(tagMatchList, (list, tuple)): sql2 = sql1 + " AND %s.Value in ( %s )" % (tableName, ','.join(["%s" % v for v in tagMatchList])) else: sql2 = sql1 + " AND %s.Value=%s" % (tableName, tagMatchList) sql = '( ' + sql1 + ' ) = (' + sql2 + ' )' return sql @staticmethod def __generateRequiredTagSQLSubCond(tableName, tagMatchList): """ Generate SQL condition where the TQ corresponds to the requirements of the resource """ sql = "SELECT COUNT(%s.Value) FROM %s WHERE %s.TQId=tq.TQId" % (tableName, tableName, tableName) if isinstance(tagMatchList, (list, tuple)): sql = sql + " AND %s.Value in ( %s )" % (tableName, ','.join(["%s" % v for v in tagMatchList])) nTags = len(tagMatchList) else: sql = sql + " AND %s.Value=%s" % (tableName, tagMatchList) nTags = 1 sql = '( %s ) = %s' % (sql, nTags) return sql def deleteJob(self, jobId, connObj=False): """ Delete a job from the task queues Return S_OK( True/False ) / S_ERROR """ if not connObj: retVal = self._getConnection() if not retVal['OK']: return S_ERROR("Can't delete job: %s" % retVal['Message']) connObj = retVal['Value'] retVal = self._query( "SELECT t.TQId, t.OwnerDN, t.OwnerGroup \ FROM `tq_TaskQueues` t, `tq_Jobs` j \ WHERE j.JobId = %s AND t.TQId = j.TQId" % jobId, conn=connObj) if not retVal['OK']: return S_ERROR("Could not get job from task queue %s: %s" % (jobId, retVal['Message'])) data = retVal['Value'] if not data: return S_OK(False) tqId, tqOwnerDN, tqOwnerGroup = data[0] self.log.verbose("Deleting job", jobId) retVal = self._update("DELETE FROM `tq_Jobs` WHERE JobId = %s" % jobId, conn=connObj) if not retVal['OK']: return S_ERROR("Could not delete job from task queue %s: %s" % (jobId, retVal['Message'])) if retVal['Value'] == 0: # No job deleted return S_OK(False) # Always return S_OK() because job has already been taken out from the TQ self.__deleteTQWithDelay.add(tqId, 300, (tqId, tqOwnerDN, tqOwnerGroup)) return S_OK(True) def getTaskQueueForJob(self, jobId, connObj=False): """ Return TaskQueue for a given Job Return S_OK( [TaskQueueID] ) / S_ERROR """ if not connObj: retVal = self._getConnection() if not retVal['OK']: return S_ERROR("Can't get TQ for job: %s" % retVal['Message']) connObj = retVal['Value'] retVal = self._query('SELECT TQId FROM `tq_Jobs` WHERE JobId = %s ' % jobId, conn=connObj) if not retVal['OK']: return retVal if not retVal['Value']: return S_ERROR('Not in TaskQueues') return S_OK(retVal['Value'][0][0]) def getTaskQueueForJobs(self, jobIDs, connObj=False): """ Return TaskQueues for a given list of Jobs """ if not connObj: retVal = self._getConnection() if not retVal['OK']: self.log.error("Can't get TQs for a job list", retVal['Message']) return retVal connObj = retVal['Value'] cmd = 'SELECT JobId,TQId FROM `tq_Jobs` WHERE JobId IN (%s) ' % ','.join(str(x) for x in jobIDs) retVal = self._query(cmd, conn=connObj) if not retVal['OK']: return retVal if not retVal['Value']: return S_ERROR('Not in TaskQueues') resultDict = {} for jobID, tqID in retVal['Value']: resultDict[int(jobID)] = int(tqID) return S_OK(resultDict) def __getOwnerForTaskQueue(self, tqId, connObj=False): retVal = self._query("SELECT OwnerDN, OwnerGroup from `tq_TaskQueues` WHERE TQId=%s" % tqId, conn=connObj) if not retVal['OK']: return retVal data = retVal['Value'] if not data: return S_OK(False) return S_OK(retVal['Value'][0]) def __deleteTQIfEmpty(self, args): (tqId, tqOwnerDN, tqOwnerGroup) = args retries = 3 while retries: retries -= 1 result = self.deleteTaskQueueIfEmpty(tqId, tqOwnerDN, tqOwnerGroup) if result['OK']: return self.log.error("Could not delete TQ", "%s: %s" % (tqId, result['Message'])) def deleteTaskQueueIfEmpty(self, tqId, tqOwnerDN=False, tqOwnerGroup=False, connObj=False): """ Try to delete a task queue if its empty """ if not connObj: retVal = self._getConnection() if not retVal['OK']: self.log.error("Can't insert job", retVal['Message']) return retVal connObj = retVal['Value'] if not tqOwnerDN or not tqOwnerGroup: retVal = self.__getOwnerForTaskQueue(tqId, connObj=connObj) if not retVal['OK']: return retVal data = retVal['Value'] if not data: return S_OK(False) tqOwnerDN, tqOwnerGroup = data sqlCmd = "SELECT TQId FROM `tq_TaskQueues` WHERE Enabled >= 1 AND `tq_TaskQueues`.TQId = %s " % tqId sqlCmd += "AND `tq_TaskQueues`.TQId not in ( SELECT DISTINCT TQId from `tq_Jobs` )" retVal = self._query(sqlCmd, conn=connObj) if not retVal['OK']: self.log.error("Could not select task queue", "%s : %s" % tqId, retVal['Message']) return retVal tqToDel = retVal['Value'] if tqToDel: for mvField in multiValueDefFields: retVal = self._update("DELETE FROM `tq_TQTo%s` WHERE TQId = %s" % (mvField, tqId), conn=connObj) if not retVal['OK']: return retVal retVal = self._update("DELETE FROM `tq_TaskQueues` WHERE TQId = %s" % tqId, conn=connObj) if not retVal['OK']: return retVal self.recalculateTQSharesForEntity(tqOwnerDN, tqOwnerGroup, connObj=connObj) self.log.info("Deleted empty and enabled TQ", tqId) return S_OK() return S_OK(False) def deleteTaskQueue(self, tqId, tqOwnerDN=False, tqOwnerGroup=False, connObj=False): """ Try to delete a task queue even if it has jobs """ self.log.info("Deleting TQ", tqId) if not connObj: retVal = self._getConnection() if not retVal['OK']: return S_ERROR("Can't insert job: %s" % retVal['Message']) connObj = retVal['Value'] if not tqOwnerDN or not tqOwnerGroup: retVal = self.__getOwnerForTaskQueue(tqId, connObj=connObj) if not retVal['OK']: return retVal data = retVal['Value'] if not data: return S_OK(False) tqOwnerDN, tqOwnerGroup = data sqlCmd = "DELETE FROM `tq_TaskQueues` WHERE `tq_TaskQueues`.TQId = %s" % tqId retVal = self._update(sqlCmd, conn=connObj) if not retVal['OK']: return S_ERROR("Could not delete task queue %s: %s" % (tqId, retVal['Message'])) delTQ = retVal['Value'] sqlCmd = "DELETE FROM `tq_Jobs` WHERE `tq_Jobs`.TQId = %s" % tqId retVal = self._update(sqlCmd, conn=connObj) if not retVal['OK']: return S_ERROR("Could not delete task queue %s: %s" % (tqId, retVal['Message'])) for field in multiValueDefFields: retVal = self._update("DELETE FROM `tq_TQTo%s` WHERE TQId = %s" % (field, tqId), conn=connObj) if not retVal['OK']: return retVal if delTQ > 0: self.recalculateTQSharesForEntity(tqOwnerDN, tqOwnerGroup, connObj=connObj) return S_OK(True) return S_OK(False) def getMatchingTaskQueues(self, tqMatchDict, negativeCond=False): """ Get the info of the task queues that match a resource """ result = self.matchAndGetTaskQueue(tqMatchDict, numQueuesToGet=0, negativeCond=negativeCond) if not result['OK']: return result return self.retrieveTaskQueues([tqTuple[0] for tqTuple in result['Value']]) def getNumTaskQueues(self): """ Get the number of task queues in the system """ sqlCmd = "SELECT COUNT( TQId ) FROM `tq_TaskQueues`" retVal = self._query(sqlCmd) if not retVal['OK']: return retVal return S_OK(retVal['Value'][0][0]) def retrieveTaskQueues(self, tqIdList=None): """ Get all the task queues """ sqlSelectEntries = ["`tq_TaskQueues`.TQId", "`tq_TaskQueues`.Priority", "COUNT( `tq_Jobs`.TQId )"] sqlGroupEntries = ["`tq_TaskQueues`.TQId", "`tq_TaskQueues`.Priority"] for field in singleValueDefFields: sqlSelectEntries.append("`tq_TaskQueues`.%s" % field) sqlGroupEntries.append("`tq_TaskQueues`.%s" % field) sqlCmd = "SELECT %s FROM `tq_TaskQueues`, `tq_Jobs`" % ", ".join(sqlSelectEntries) sqlTQCond = "" if tqIdList is not None: if not tqIdList: # Empty list => Fast-track no matches return S_OK({}) else: sqlTQCond += " AND `tq_TaskQueues`.TQId in ( %s )" % ", ".join([str(id_) for id_ in tqIdList]) sqlCmd = "%s WHERE `tq_TaskQueues`.TQId = `tq_Jobs`.TQId %s GROUP BY %s" % (sqlCmd, sqlTQCond, ", ".join(sqlGroupEntries)) retVal = self._query(sqlCmd) if not retVal['OK']: self.log.error("Can't retrieve task queues info", retVal['Message']) return retVal tqData = {} for record in retVal['Value']: tqId = record[0] tqData[tqId] = {'Priority': record[1], 'Jobs': record[2]} record = record[3:] for iP, _ in enumerate(singleValueDefFields): tqData[tqId][singleValueDefFields[iP]] = record[iP] tqNeedCleaning = False for field in multiValueDefFields: table = "`tq_TQTo%s`" % field sqlCmd = "SELECT %s.TQId, %s.Value FROM %s" % (table, table, table) retVal = self._query(sqlCmd) if not retVal['OK']: self.log.error("Can't retrieve task queues field", "%s info: %s" % (field, retVal['Message'])) return retVal for record in retVal['Value']: tqId = record[0] value = record[1] if tqId not in tqData: if tqIdList is None or tqId in tqIdList: self.log.verbose( "Task Queue is defined for a field, but does not exist: triggering a cleaning", "TQID: %s, field: %s" % (tqId, field)) tqNeedCleaning = True else: if field not in tqData[tqId]: tqData[tqId][field] = [] tqData[tqId][field].append(value) if tqNeedCleaning: self.cleanOrphanedTaskQueues() return S_OK(tqData) def __updateGlobalShares(self): """ Update internal structure for shares """ # Update group shares self.__groupShares = self.getGroupShares() # Apply corrections if enabled if self.isSharesCorrectionEnabled(): result = self.getGroupsInTQs() if not result['OK']: self.log.error("Could not get groups in the TQs", result['Message']) activeGroups = result['Value'] newShares = {} for group in activeGroups: if group in self.__groupShares: newShares[group] = self.__groupShares[group] newShares = self.__sharesCorrector.correctShares(newShares) for group in self.__groupShares: if group in newShares: self.__groupShares[group] = newShares[group] def recalculateTQSharesForAll(self): """ Recalculate all priorities for TQ's """ if self.isSharesCorrectionEnabled(): self.log.info("Updating correctors state") self.__sharesCorrector.update() self.__updateGlobalShares() self.log.info("Recalculating shares for all TQs") retVal = self._getConnection() if not retVal['OK']: return S_ERROR("Can't insert job: %s" % retVal['Message']) result = self._query("SELECT DISTINCT( OwnerGroup ) FROM `tq_TaskQueues`") if not result['OK']: return result for group in [r[0] for r in result['Value']]: self.recalculateTQSharesForEntity("all", group) return S_OK() def recalculateTQSharesForEntity(self, userDN, userGroup, connObj=False): """ Recalculate the shares for a userDN/userGroup combo """ self.log.info("Recalculating shares", "for %s@%s TQs" % (userDN, userGroup)) if userGroup in self.__groupShares: share = self.__groupShares[userGroup] else: share = float(DEFAULT_GROUP_SHARE) if Properties.JOB_SHARING in Registry.getPropertiesForGroup(userGroup): # If group has JobSharing just set prio for that entry, userDN is irrelevant return self.__setPrioritiesForEntity(userDN, userGroup, share, connObj=connObj) selSQL = "SELECT OwnerDN, COUNT(OwnerDN) FROM `tq_TaskQueues` WHERE OwnerGroup='%s' GROUP BY OwnerDN" % (userGroup) result = self._query(selSQL, conn=connObj) if not result['OK']: return result # Get owners in this group and the amount of times they appear data = [(r[0], r[1]) for r in result['Value'] if r] numOwners = len(data) # If there are no owners do now if numOwners == 0: return S_OK() # Split the share amongst the number of owners share /= numOwners entitiesShares = dict([(row[0], share) for row in data]) # If corrector is enabled let it work it's magic if self.isSharesCorrectionEnabled(): entitiesShares = self.__sharesCorrector.correctShares(entitiesShares, group=userGroup) # Keep updating owners = dict(data) # IF the user is already known and has more than 1 tq, the rest of the users don't need to be modified # (The number of owners didn't change) if userDN in owners and owners[userDN] > 1: return self.__setPrioritiesForEntity(userDN, userGroup, entitiesShares[userDN], connObj=connObj) # Oops the number of owners may have changed so we recalculate the prio for all owners in the group for userDN in owners: self.__setPrioritiesForEntity(userDN, userGroup, entitiesShares[userDN], connObj=connObj) return S_OK() def __setPrioritiesForEntity(self, userDN, userGroup, share, connObj=False, consolidationFunc="AVG"): """ Set the priority for a userDN/userGroup combo given a splitted share """ self.log.info("Setting priorities", "to %s@%s TQs" % (userDN, userGroup)) tqCond = ["t.OwnerGroup='%s'" % userGroup] allowBgTQs = gConfig.getValue("/Registry/Groups/%s/AllowBackgroundTQs" % userGroup, False) if Properties.JOB_SHARING not in Registry.getPropertiesForGroup(userGroup): res = self._escapeString(userDN) if not res['OK']: return res userDN = res['Value'] tqCond.append("t.OwnerDN= %s " % userDN) tqCond.append("t.TQId = j.TQId") if consolidationFunc == 'AVG': selectSQL = "SELECT j.TQId, SUM( j.RealPriority )/COUNT(j.RealPriority) \ FROM `tq_TaskQueues` t, `tq_Jobs` j WHERE " elif consolidationFunc == 'SUM': selectSQL = "SELECT j.TQId, SUM( j.RealPriority ) FROM `tq_TaskQueues` t, `tq_Jobs` j WHERE " else: return S_ERROR("Unknown consolidation func %s for setting priorities" % consolidationFunc) selectSQL += " AND ".join(tqCond) selectSQL += " GROUP BY t.TQId" result = self._query(selectSQL, conn=connObj) if not result['OK']: return result tqDict = dict(result['Value']) if not tqDict: return S_OK() # Calculate Sum of priorities totalPrio = 0 for k in tqDict: if tqDict[k] > 0.1 or not allowBgTQs: totalPrio += tqDict[k] # Update prio for each TQ for tqId in tqDict: if tqDict[tqId] > 0.1 or not allowBgTQs: prio = (share / totalPrio) * tqDict[tqId] else: prio = TQ_MIN_SHARE prio = max(prio, TQ_MIN_SHARE) tqDict[tqId] = prio # Generate groups of TQs that will have the same prio=sum(prios) maomenos result = self.retrieveTaskQueues(list(tqDict)) if not result['OK']: return result allTQsData = result['Value'] tqGroups = {} for tqid in allTQsData: tqData = allTQsData[tqid] for field in ('Jobs', 'Priority') + priorityIgnoredFields: if field in tqData: tqData.pop(field) tqHash = [] for f in sorted(tqData): tqHash.append("%s:%s" % (f, tqData[f])) tqHash = "|".join(tqHash) if tqHash not in tqGroups: tqGroups[tqHash] = [] tqGroups[tqHash].append(tqid) tqGroups = [tqGroups[td] for td in tqGroups] # Do the grouping for tqGroup in tqGroups: totalPrio = 0 if len(tqGroup) < 2: continue for tqid in tqGroup: totalPrio += tqDict[tqid] for tqid in tqGroup: tqDict[tqid] = totalPrio # Group by priorities prioDict = {} for tqId in tqDict: prio = tqDict[tqId] if prio not in prioDict: prioDict[prio] = [] prioDict[prio].append(tqId) # Execute updates for prio in prioDict: tqList = ", ".join([str(tqId) for tqId in prioDict[prio]]) updateSQL = "UPDATE `tq_TaskQueues` SET Priority=%.4f WHERE TQId in ( %s )" % (prio, tqList) self._update(updateSQL, conn=connObj) return S_OK() @staticmethod def getGroupShares(): """ Get all the shares as a DICT """ result = gConfig.getSections("/Registry/Groups") if result['OK']: groups = result['Value'] else: groups = [] shares = {} for group in groups: shares[group] = gConfig.getValue("/Registry/Groups/%s/JobShare" % group, DEFAULT_GROUP_SHARE) return shares
yujikato/DIRAC
src/DIRAC/WorkloadManagementSystem/DB/TaskQueueDB.py
Python
gpl-3.0
54,572
[ "DIRAC" ]
910f1402c7a3bc7dce52949bf23a959a8be80d78208ff0e60a64bc841bef13b7
__author__ = 'Zero' from django.utils.translation import ugettext_lazy as _ from horizon import tables from django.utils.translation import pgettext_lazy from django import template from openstack_dashboard.dashboards.admin.systemlogs import trans import logging LOG = logging.getLogger(__name__) class DownloadLogs(tables.LinkAction): name = "Logs" verbose_name = _("Download") icon = "download" def get_link_url(self): return "?format=csv" class LogsFilterAction(tables.FilterAction): def filter(self, table, logs, filter_string): q = filter_string.lower() def comp(tenant): if q in logs.name.lower(): return True return False return filter(comp, logs) def get_loginfo(db_log_data): template_name = 'admin/systemlogs/_loginfo.html' context = { "event_subject": db_log_data.event_subject, "id": db_log_data.id, "event_object": db_log_data.event_object, "user_name": db_log_data.user_name, "project_name": db_log_data.project_name, "time" :db_log_data.event_time, "result": db_log_data.result, "detail": db_log_data.message } return template.loader.render_to_string(template_name, context) class LogsTable(tables.DataTable): STATUS_CHOICES = ( ("createinstance", True), ("delete", True), ) event_subject = tables.Column('event_subject', verbose_name=_('Subject'), status=True, display_choices=trans.STATUS_DISPLAY_CHOICES) event_object = tables.Column(get_loginfo, verbose_name=_('Name'), attrs={'data-type': 'event_object'},) user_id = tables.Column('user_name', verbose_name=_('User'),) project_id = tables.Column('project_name', verbose_name=_('Tenant'),) visit_ip = tables.Column('visit_ip', verbose_name=_('Visit IP'),) event_time = tables.Column('event_time', verbose_name=_('Time'),) result = tables.Column('result', verbose_name=_('Result'),status=True, display_choices=trans.RESULT_DISPLAY_CHOICES) class Meta: name = "Logs" verbose_name = _("System Logs") table_actions = (DownloadLogs, ) multi_select = False
xuweiliang/Codelibrary
openstack_dashboard/dashboards/admin/systemlogs/tables.py
Python
apache-2.0
2,304
[ "VisIt" ]
b86e257d055a546d0bc12467de6c0a4f001266c1643e3d337b87b31cb462375b
import sys import numpy as np import pyzed.sl as sl import cv2 help_string = "[s] Save side by side image [d] Save Depth, [n] Change Depth format, [p] Save Point Cloud, [m] Change Point Cloud format, [q] Quit" prefix_point_cloud = "Cloud_" prefix_depth = "Depth_" path = "./" count_save = 0 mode_point_cloud = 0 mode_depth = 0 point_cloud_format_ext = ".ply" depth_format_ext = ".png" def point_cloud_format_name(): global mode_point_cloud if mode_point_cloud > 3: mode_point_cloud = 0 switcher = { 0: ".xyz", 1: ".pcd", 2: ".ply", 3: ".vtk", } return switcher.get(mode_point_cloud, "nothing") def depth_format_name(): global mode_depth if mode_depth > 2: mode_depth = 0 switcher = { 0: ".png", 1: ".pfm", 2: ".pgm", } return switcher.get(mode_depth, "nothing") def save_point_cloud(zed, filename) : print("Saving Point Cloud...") tmp = sl.Mat() zed.retrieve_measure(tmp, sl.MEASURE.XYZRGBA) saved = (tmp.write(filename + point_cloud_format_ext) == sl.ERROR_CODE.SUCCESS) if saved : print("Done") else : print("Failed... Please check that you have permissions to write on disk") def save_depth(zed, filename) : print("Saving Depth Map...") tmp = sl.Mat() zed.retrieve_measure(tmp, sl.MEASURE.DEPTH) saved = (tmp.write(filename + depth_format_ext) == sl.ERROR_CODE.SUCCESS) if saved : print("Done") else : print("Failed... Please check that you have permissions to write on disk") def save_sbs_image(zed, filename) : image_sl_left = sl.Mat() zed.retrieve_image(image_sl_left, sl.VIEW.LEFT) image_cv_left = image_sl_left.get_data() image_sl_right = sl.Mat() zed.retrieve_image(image_sl_right, sl.VIEW.RIGHT) image_cv_right = image_sl_right.get_data() sbs_image = np.concatenate((image_cv_left, image_cv_right), axis=1) cv2.imwrite(filename, sbs_image) def process_key_event(zed, key) : global mode_depth global mode_point_cloud global count_save global depth_format_ext global point_cloud_format_ext if key == 100 or key == 68: save_depth(zed, path + prefix_depth + str(count_save)) count_save += 1 elif key == 110 or key == 78: mode_depth += 1 depth_format_ext = depth_format_name() print("Depth format: ", depth_format_ext) elif key == 112 or key == 80: save_point_cloud(zed, path + prefix_point_cloud + str(count_save)) count_save += 1 elif key == 109 or key == 77: mode_point_cloud += 1 point_cloud_format_ext = point_cloud_format_name() print("Point Cloud format: ", point_cloud_format_ext) elif key == 104 or key == 72: print(help_string) elif key == 115: save_sbs_image(zed, "ZED_image" + str(count_save) + ".png") count_save += 1 else: a = 0 def print_help() : print(" Press 's' to save Side by side images") print(" Press 'p' to save Point Cloud") print(" Press 'd' to save Depth image") print(" Press 'm' to switch Point Cloud format") print(" Press 'n' to switch Depth format") def main() : # Create a ZED camera object zed = sl.Camera() # Set configuration parameters input_type = sl.InputType() if len(sys.argv) >= 2 : input_type.set_from_svo_file(sys.argv[1]) init = sl.InitParameters(input_t=input_type) init.camera_resolution = sl.RESOLUTION.HD1080 init.depth_mode = sl.DEPTH_MODE.PERFORMANCE init.coordinate_units = sl.UNIT.MILLIMETER # Open the camera err = zed.open(init) if err != sl.ERROR_CODE.SUCCESS : print(repr(err)) zed.close() exit(1) # Display help in console print_help() # Set runtime parameters after opening the camera runtime = sl.RuntimeParameters() runtime.sensing_mode = sl.SENSING_MODE.STANDARD # Prepare new image size to retrieve half-resolution images image_size = zed.get_camera_information().camera_resolution image_size.width = image_size.width /2 image_size.height = image_size.height /2 # Declare your sl.Mat matrices image_zed = sl.Mat(image_size.width, image_size.height, sl.MAT_TYPE.U8_C4) depth_image_zed = sl.Mat(image_size.width, image_size.height, sl.MAT_TYPE.U8_C4) point_cloud = sl.Mat() key = ' ' while key != 113 : err = zed.grab(runtime) if err == sl.ERROR_CODE.SUCCESS : # Retrieve the left image, depth image in the half-resolution zed.retrieve_image(image_zed, sl.VIEW.LEFT, sl.MEM.CPU, image_size) zed.retrieve_image(depth_image_zed, sl.VIEW.DEPTH, sl.MEM.CPU, image_size) # Retrieve the RGBA point cloud in half resolution zed.retrieve_measure(point_cloud, sl.MEASURE.XYZRGBA, sl.MEM.CPU, image_size) # To recover data from sl.Mat to use it with opencv, use the get_data() method # It returns a numpy array that can be used as a matrix with opencv image_ocv = image_zed.get_data() depth_image_ocv = depth_image_zed.get_data() cv2.imshow("Image", image_ocv) cv2.imshow("Depth", depth_image_ocv) key = cv2.waitKey(10) process_key_event(zed, key) cv2.destroyAllWindows() zed.close() print("\nFINISH") if __name__ == "__main__": main()
stereolabs/zed-opencv
python/zed-opencv.py
Python
mit
5,466
[ "VTK" ]
7325961b55baaabcbe4dff5236ab35eb9fbf424206bfc4d67015dfa9043c894c
# (C) British Crown Copyright 2010 - 2015, Met Office # # This file is part of Iris. # # Iris is free software: you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by the # Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Iris is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with Iris. If not, see <http://www.gnu.org/licenses/>. from __future__ import (absolute_import, division, print_function) from six.moves import (filter, input, map, range, zip) # noqa import six # import iris tests first so that some things can be initialised before importing anything else import iris.tests as tests import biggus import numpy as np import iris.analysis.trajectory import iris.tests.stock # Run tests in no graphics mode if matplotlib is not available. if tests.MPL_AVAILABLE: import matplotlib.pyplot as plt class TestSimple(tests.IrisTest): def test_invalid_coord(self): cube = iris.tests.stock.realistic_4d() sample_points = [('altitude', [0, 10, 50])] with self.assertRaises(ValueError): iris.analysis.trajectory.interpolate(cube, sample_points, 'nearest') class TestTrajectory(tests.IrisTest): def test_trajectory_definition(self): # basic 2-seg line along x waypoints = [ {'lat':0, 'lon':0}, {'lat':0, 'lon':1}, {'lat':0, 'lon':2} ] trajectory = iris.analysis.trajectory.Trajectory(waypoints, sample_count=21) self.assertEqual(trajectory.length, 2.0) self.assertEqual(trajectory.sampled_points[19], {'lat': 0.0, 'lon': 1.9000000000000001}) # 4-seg m-shape waypoints = [ {'lat':0, 'lon':0}, {'lat':1, 'lon':1}, {'lat':0, 'lon':2}, {'lat':1, 'lon':3}, {'lat':0, 'lon':4} ] trajectory = iris.analysis.trajectory.Trajectory(waypoints, sample_count=33) self.assertEqual(trajectory.length, 5.6568542494923806) self.assertEqual(trajectory.sampled_points[31], {'lat': 0.12499999999999989, 'lon': 3.875}) @tests.skip_data @tests.skip_plot def test_trajectory_extraction(self): # Load the COLPEX data => TZYX path = tests.get_data_path(['PP', 'COLPEX', 'theta_and_orog_subset.pp']) cube = iris.load_cube(path, 'air_potential_temperature') cube.coord('grid_latitude').bounds = None cube.coord('grid_longitude').bounds = None # TODO: Workaround until regrid can handle factories cube.remove_aux_factory(cube.aux_factories[0]) cube.remove_coord('surface_altitude') self.assertCML(cube, ('trajectory', 'big_cube.cml')) # Pull out a single point - no interpolation required single_point = iris.analysis.trajectory.interpolate( cube, [('grid_latitude', [-0.1188]), ('grid_longitude', [359.57958984])]) expected = cube[..., 10, 0].data self.assertArrayAllClose(single_point[..., 0].data, expected, rtol=2.0e-7) self.assertCML(single_point, ('trajectory', 'single_point.cml'), checksum=False) # Pull out another point and test against a manually calculated result. single_point = [['grid_latitude', [-0.1188]], ['grid_longitude', [359.584090412]]] scube = cube[0, 0, 10:11, 4:6] x0 = scube.coord('grid_longitude')[0].points x1 = scube.coord('grid_longitude')[1].points y0 = scube.data[0, 0] y1 = scube.data[0, 1] expected = y0 + ((y1 - y0) * ((359.584090412 - x0)/(x1 - x0))) trajectory_cube = iris.analysis.trajectory.interpolate(scube, single_point) self.assertArrayAllClose(trajectory_cube.data, expected, rtol=2.0e-7) # Extract a simple, axis-aligned trajectory that is similar to an indexing operation. # (It's not exactly the same because the source cube doesn't have regular spacing.) waypoints = [ {'grid_latitude': -0.1188, 'grid_longitude': 359.57958984}, {'grid_latitude': -0.1188, 'grid_longitude': 359.66870117} ] trajectory = iris.analysis.trajectory.Trajectory(waypoints, sample_count=100) def traj_to_sample_points(trajectory): sample_points = [] src_points = trajectory.sampled_points for name in six.iterkeys(src_points[0]): values = [point[name] for point in src_points] sample_points.append((name, values)) return sample_points sample_points = traj_to_sample_points(trajectory) trajectory_cube = iris.analysis.trajectory.interpolate(cube, sample_points) self.assertCML(trajectory_cube, ('trajectory', 'constant_latitude.cml')) # Sanity check the results against a simple slice plt.plot(cube[0, 0, 10, :].data) plt.plot(trajectory_cube[0, 0, :].data) self.check_graphic() # Extract a zig-zag trajectory waypoints = [ {'grid_latitude': -0.1188, 'grid_longitude': 359.5886}, {'grid_latitude': -0.0828, 'grid_longitude': 359.6606}, {'grid_latitude': -0.0468, 'grid_longitude': 359.6246}, ] trajectory = iris.analysis.trajectory.Trajectory(waypoints, sample_count=20) sample_points = traj_to_sample_points(trajectory) trajectory_cube = iris.analysis.trajectory.interpolate( cube[0, 0], sample_points) expected = np.array([287.95953369, 287.9190979, 287.95550537, 287.93240356, 287.83850098, 287.87869263, 287.90942383, 287.9463501, 287.74365234, 287.68856812, 287.75588989, 287.54611206, 287.48522949, 287.53356934, 287.60217285, 287.43795776, 287.59701538, 287.52468872, 287.45025635, 287.52716064], dtype=np.float32) self.assertCML(trajectory_cube, ('trajectory', 'zigzag.cml'), checksum=False) self.assertArrayAllClose(trajectory_cube.data, expected, rtol=2.0e-7) # Sanity check the results against a simple slice x = cube.coord('grid_longitude').points y = cube.coord('grid_latitude').points plt.pcolormesh(x, y, cube[0, 0, :, :].data) x = trajectory_cube.coord('grid_longitude').points y = trajectory_cube.coord('grid_latitude').points plt.scatter(x, y, c=trajectory_cube.data) self.check_graphic() @tests.skip_data @tests.skip_plot def test_tri_polar(self): # load data cubes = iris.load(tests.get_data_path(['NetCDF', 'ORCA2', 'votemper.nc'])) cube = cubes[0] # The netCDF file has different data types for the points and # bounds of 'depth'. This wasn't previously supported, so we # emulate that old behaviour. cube.coord('depth').bounds = cube.coord('depth').bounds.astype(np.float32) # define a latitude trajectory (put coords in a different order to the cube, just to be awkward) latitudes = list(range(-90, 90, 2)) longitudes = [-90]*len(latitudes) sample_points = [('longitude', longitudes), ('latitude', latitudes)] # extract sampled_cube = iris.analysis.trajectory.interpolate(cube, sample_points) self.assertCML(sampled_cube, ('trajectory', 'tri_polar_latitude_slice.cml')) # turn it upside down for the visualisation plot_cube = sampled_cube[0] plot_cube = plot_cube[::-1, :] plt.clf() plt.pcolormesh(plot_cube.data, vmin=cube.data.min(), vmax=cube.data.max()) plt.colorbar() self.check_graphic() # Try to request linear interpolation. # Not allowed, as we have multi-dimensional coords. self.assertRaises(iris.exceptions.CoordinateMultiDimError, iris.analysis.trajectory.interpolate, cube, sample_points, method="linear") # Try to request unknown interpolation. self.assertRaises(ValueError, iris.analysis.trajectory.interpolate, cube, sample_points, method="linekar") def test_hybrid_height(self): cube = tests.stock.simple_4d_with_hybrid_height() # Put a biggus array on the cube so we can test deferred loading. cube.lazy_data(biggus.NumpyArrayAdapter(cube.data)) traj = (('grid_latitude', [20.5, 21.5, 22.5, 23.5]), ('grid_longitude', [31, 32, 33, 34])) xsec = iris.analysis.trajectory.interpolate(cube, traj, method='nearest') # Check that creating the trajectory hasn't led to the original # data being loaded. self.assertTrue(cube.has_lazy_data()) self.assertCML([cube, xsec], ('trajectory', 'hybrid_height.cml')) if __name__ == '__main__': tests.main()
decvalts/iris
lib/iris/tests/test_trajectory.py
Python
gpl-3.0
9,235
[ "NetCDF" ]
6974de1172dacd031376721656036290095eaf2f91aacbb0e01c6026a19c4a22
# -*- coding: utf-8 -*- ############################################################################### # lazyflow: data flow based lazy parallel computation framework # # Copyright (C) 2011-2014, the ilastik developers # <team@ilastik.org> # # This program is free software; you can redistribute it and/or # modify it under the terms of the Lesser GNU General Public License # as published by the Free Software Foundation; either version 2.1 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # See the files LICENSE.lgpl2 and LICENSE.lgpl3 for full text of the # GNU Lesser General Public License version 2.1 and 3 respectively. # This information is also available on the ilastik web site at: # http://ilastik.org/license/ ############################################################################### #Python import os from collections import deque import itertools import math import traceback from functools import partial import logging import copy import time logger = logging.getLogger(__name__) #SciPy import numpy, vigra #lazyflow from lazyflow.graph import Operator, InputSlot, OutputSlot, OrderedSignal from lazyflow import roi from lazyflow.roi import sliceToRoi, roiToSlice from lazyflow.request import RequestPool from operators import OpArrayPiper from lazyflow.rtype import SubRegion from generic import OpMultiArrayStacker, popFlagsFromTheKey def zfill_num(n, stop): """ Make int strings same length. >>> zfill_num(1, 100) # len('99') == 2 '01' >>> zfill_num(1, 101) # len('100') == 3 '001' """ return str(n).zfill(len(str(stop - 1))) def makeOpXToMulti(n): """A factory for creating OpXToMulti classes.""" assert n > 0 class OpXToMulti(Operator): category = "Misc" name = "{} Element to Multislot".format(n) if n == 1: inputSlots = [InputSlot('Input')] else: names = list("Input{}".format(zfill_num(i, n)) for i in range(n)) inputSlots = list(InputSlot(name, optional=True) for name in names) outputSlots = [OutputSlot("Outputs", level=1)] def _sorted_inputs(self, filterReady=False): """Returns self.inputs.values() sorted by keys. :param filterReady: only return slots that are ready. """ keys = sorted(self.inputs.keys()) slots = list(self.inputs[k] for k in keys) if filterReady: slots = list(s for s in slots if s.ready()) return slots def _do_assignfrom(self, inslots): for inslot, outslot in zip(inslots, self.outputs['Outputs']): outslot.meta.assignFrom(inslot.meta) def setupOutputs(self): inslots = self._sorted_inputs(filterReady=True) self.outputs["Outputs"].resize(len(inslots)) self._do_assignfrom(inslots) def execute(self, slot, subindex, roi, result): key = roiToSlice(roi.start, roi.stop) index = subindex[0] inslots = self._sorted_inputs(filterReady=True) if index < len(inslots): return inslots[index][key].wait() def propagateDirty(self, islot, subindex, roi): inslots = self._sorted_inputs(filterReady=True) index = inslots.index(islot) self.outputs["Outputs"][index].setDirty(roi) readyslots = list(s for s in inslots[:index] if s.ready()) self._do_assignfrom(readyslots) def setInSlot(self, slot, subindex, roi, value): # Nothing to do here: All inputs are directly connected to an input slot. pass return OpXToMulti Op1ToMulti = makeOpXToMulti(1) Op5ToMulti = makeOpXToMulti(5) Op50ToMulti = makeOpXToMulti(50) class OpPixelFeaturesPresmoothed(Operator): name="OpPixelFeaturesPresmoothed" category = "Vigra filter" inputSlots = [InputSlot("Input"), InputSlot("Matrix"), InputSlot("Scales"), InputSlot("FeatureIds")] # The selection of features to compute outputSlots = [OutputSlot("Output"), # The entire block of features as a single image (many channels) OutputSlot("Features", level=1)] # Each feature image listed separately, with feature name provided in metadata # Specify a default set & order for the features we compute DefaultFeatureIds = [ 'GaussianSmoothing', 'LaplacianOfGaussian', 'GaussianGradientMagnitude', 'DifferenceOfGaussians', 'StructureTensorEigenvalues', 'HessianOfGaussianEigenvalues' ] WINDOW_SIZE = 3.5 class InvalidScalesError(Exception): def __init__(self, invalid_scales): self.invalid_scales = invalid_scales def __init__(self, *args, **kwargs): Operator.__init__(self, *args, **kwargs) self.source = OpArrayPiper(parent=self) self.source.inputs["Input"].connect(self.inputs["Input"]) # Give our feature IDs input a default value (connected out of the box, but can be changed) self.inputs["FeatureIds"].setValue( self.DefaultFeatureIds ) def getInvalidScales(self): """ Check each of the scales the user selected against the shape of the input dataset (in space only). Return a list of the selected scales that are too large for the input dataset. .. note:: This function is NOT called automatically. Clients are expected to call it after configuring the operator, before they attempt to execute() the operator. If this function returns a non-empty list of scales, then calling execute() would generate errors. """ invalid_scales = [] for j, scale in enumerate(self.scales): if self.matrix[:,j].any(): tagged_shape = self.Input.meta.getTaggedShape() spatial_axes_shape = filter( lambda (k,v): k in 'xyz', tagged_shape.items() ) spatial_shape = zip( *spatial_axes_shape )[1] if (scale * self.WINDOW_SIZE > numpy.array(spatial_shape)).any(): invalid_scales.append( scale ) return invalid_scales def setupOutputs(self): assert self.Input.meta.getAxisKeys()[-1] == 'c', "This code assumes channel is the last axis" self.scales = self.inputs["Scales"].value self.matrix = self.inputs["Matrix"].value if not isinstance(self.matrix, numpy.ndarray): raise RuntimeError("OpPixelFeatures: Please input a numpy.ndarray as 'Matrix'") dimCol = len(self.scales) dimRow = len(self.inputs["FeatureIds"].value) assert dimRow== self.matrix.shape[0], "Please check the matrix or the scales they are not the same (scales = %r, matrix.shape = %r)" % (self.scales, self.matrix.shape) assert dimCol== self.matrix.shape[1], "Please check the matrix or the scales they are not the same (scales = %r, matrix.shape = %r)" % (self.scales, self.matrix.shape) featureNameArray =[] oparray = [] for j in range(dimRow): oparray.append([]) featureNameArray.append([]) self.newScales = [] for j in range(dimCol): destSigma = 1.0 if self.scales[j] > destSigma: self.newScales.append(destSigma) else: self.newScales.append(self.scales[j]) logger.debug("Replacing scale %f with new scale %f" %(self.scales[j], self.newScales[j])) for i, featureId in enumerate(self.inputs["FeatureIds"].value): if featureId == 'GaussianSmoothing': for j in range(dimCol): oparray[i].append(OpGaussianSmoothing(self)) oparray[i][j].inputs["Input"].connect(self.source.outputs["Output"]) oparray[i][j].inputs["sigma"].setValue(self.newScales[j]) featureNameArray[i].append("Gaussian Smoothing (σ=" + str(self.scales[j]) + ")") elif featureId == 'LaplacianOfGaussian': for j in range(dimCol): oparray[i].append(OpLaplacianOfGaussian(self)) oparray[i][j].inputs["Input"].connect(self.source.outputs["Output"]) oparray[i][j].inputs["scale"].setValue(self.newScales[j]) featureNameArray[i].append("Laplacian of Gaussian (σ=" + str(self.scales[j]) + ")") elif featureId == 'StructureTensorEigenvalues': for j in range(dimCol): oparray[i].append(OpStructureTensorEigenvalues(self)) oparray[i][j].inputs["Input"].connect(self.source.outputs["Output"]) # Note: If you need to change the inner or outer scale, # you must make a new feature (with a new feature ID) and # leave this feature here to preserve backwards compatibility oparray[i][j].inputs["innerScale"].setValue(self.newScales[j]) #FIXME, FIXME, FIXME #sigma1 = [x*0.5 for x in self.newScales[j]] #oparray[i][j].inputs["outerScale"].setValue(sigma1) oparray[i][j].inputs["outerScale"].setValue(self.newScales[j]*0.5) featureNameArray[i].append("Structure Tensor Eigenvalues (σ=" + str(self.scales[j]) + ")") elif featureId == 'HessianOfGaussianEigenvalues': for j in range(dimCol): oparray[i].append(OpHessianOfGaussianEigenvalues(self)) oparray[i][j].inputs["Input"].connect(self.source.outputs["Output"]) oparray[i][j].inputs["scale"].setValue(self.newScales[j]) featureNameArray[i].append("Hessian of Gaussian Eigenvalues (σ=" + str(self.scales[j]) + ")") elif featureId == 'GaussianGradientMagnitude': for j in range(dimCol): oparray[i].append(OpGaussianGradientMagnitude(self)) oparray[i][j].inputs["Input"].connect(self.source.outputs["Output"]) oparray[i][j].inputs["sigma"].setValue(self.newScales[j]) featureNameArray[i].append("Gaussian Gradient Magnitude (σ=" + str(self.scales[j]) + ")") elif featureId == 'DifferenceOfGaussians': for j in range(dimCol): oparray[i].append(OpDifferenceOfGaussians(self)) oparray[i][j].inputs["Input"].connect(self.source.outputs["Output"]) # Note: If you need to change sigma0 or sigma1, you must make a new # feature (with a new feature ID) and leave this feature here # to preserve backwards compatibility oparray[i][j].inputs["sigma0"].setValue(self.newScales[j]) #FIXME, FIXME, FIXME #sigma1 = [x*0.66 for x in self.newScales[j]] #oparray[i][j].inputs["sigma1"].setValue(sigma1) oparray[i][j].inputs["sigma1"].setValue(self.newScales[j]*0.66) featureNameArray[i].append("Difference of Gaussians (σ=" + str(self.scales[j]) + ")") channelCount = 0 featureCount = 0 self.Features.resize( 0 ) self.featureOutputChannels = [] channel_names = [] #connect individual operators for i in range(dimRow): for j in range(dimCol): if self.matrix[i,j]: # Feature names are provided via metadata oparray[i][j].outputs["Output"].meta.description = featureNameArray[i][j] # Prepare the individual features featureCount += 1 self.Features.resize( featureCount ) featureMeta = oparray[i][j].outputs["Output"].meta featureChannels = featureMeta.shape[ featureMeta.axistags.index('c') ] if featureChannels == 1: channel_names.append( featureNameArray[i][j] ) else: for feature_channel_index in range(featureChannels): channel_names.append( featureNameArray[i][j] + " [{}]".format(feature_channel_index) ) self.Features[featureCount-1].meta.assignFrom( featureMeta ) self.Features[featureCount-1].meta.axistags["c"].description = "" # Discard any semantics related to the input channels self.Features[featureCount-1].meta.display_mode = "" # Discard any semantics related to the input channels self.featureOutputChannels.append( (channelCount, channelCount + featureChannels) ) channelCount += featureChannels if self.matrix.any(): self.maxSigma = 0 #determine maximum sigma for i in range(dimRow): for j in range(dimCol): val=self.matrix[i,j] if val: self.maxSigma = max(self.scales[j],self.maxSigma) self.featureOps = oparray # Output meta is a modified copy of the input meta self.Output.meta.assignFrom(self.Input.meta) self.Output.meta.dtype = numpy.float32 self.Output.meta.axistags["c"].description = "" # Discard any semantics related to the input channels self.Output.meta.display_mode = "grayscale" self.Output.meta.channel_names = channel_names self.Output.meta.shape = self.Input.meta.shape[:-1] + (channelCount,) self.Output.meta.ideal_blockshape = self._get_ideal_blockshape() # FIXME: Features are float, so we need AT LEAST 4 bytes per output channel, # but vigra functions may use internal RAM as well. self.Output.meta.ram_usage_per_requested_pixel = 4.0 * self.Output.meta.shape[-1] def _get_ideal_blockshape(self): tagged_blockshape = self.Output.meta.getTaggedShape() if 't' in tagged_blockshape: # There is no advantage to grouping time slices in a single request. tagged_blockshape['t'] = 1 for k in 'xyz': # There is no natural blockshape for spatial dimensions. if k in tagged_blockshape: tagged_blockshape[k] = 0 input_blockshape = self.Input.meta.ideal_blockshape if input_blockshape is None: input_blockshape = (0,) * len( self.Input.meta.shape ) output_blockshape = tagged_blockshape.values() final_blockshape = numpy.maximum( input_blockshape, output_blockshape ) return tuple( final_blockshape ) def propagateDirty(self, inputSlot, subindex, roi): if inputSlot == self.Input: channelAxis = self.Input.meta.axistags.index('c') numChannels = self.Input.meta.shape[channelAxis] dirtyChannels = roi.stop[channelAxis] - roi.start[channelAxis] # If all the input channels were dirty, the dirty output region is a contiguous block if dirtyChannels == numChannels: dirtyKey = list(roiToSlice(roi.start, roi.stop)) dirtyKey[channelAxis] = slice(None) dirtyRoi = sliceToRoi(dirtyKey, self.Output.meta.shape) self.Output.setDirty(dirtyRoi[0], dirtyRoi[1]) else: # Only some input channels were dirty, # so we must mark each dirty output region separately. numFeatures = self.Output.meta.shape[channelAxis] / numChannels for featureIndex in range(numFeatures): startChannel = numChannels*featureIndex + roi.start[channelAxis] stopChannel = startChannel + roi.stop[channelAxis] dirtyRoi = copy.copy(roi) dirtyRoi.start[channelAxis] = startChannel dirtyRoi.stop[channelAxis] = stopChannel self.Output.setDirty(dirtyRoi) elif (inputSlot == self.Matrix or inputSlot == self.Scales or inputSlot == self.FeatureIds): self.Output.setDirty(slice(None)) else: assert False, "Unknown dirty input slot." def execute(self, slot, subindex, rroi, result): assert slot == self.Features or slot == self.Output if slot == self.Features: key = roiToSlice(rroi.start, rroi.stop) index = subindex[0] key = list(key) channelIndex = self.Input.meta.axistags.index('c') # Translate channel slice to the correct location for the output slot. key[channelIndex] = slice(self.featureOutputChannels[index][0] + key[channelIndex].start, self.featureOutputChannels[index][0] + key[channelIndex].stop) rroi = SubRegion(self.Output, pslice=key) # Get output slot region for this channel return self.execute(self.Output, (), rroi, result) elif slot == self.outputs["Output"]: key = rroi.toSlice() logger.debug("OpPixelFeaturesPresmoothed: request %s" % (rroi.pprint(),)) cnt = 0 written = 0 assert (rroi.stop<=self.outputs["Output"].meta.shape).all() flag = 'c' channelAxis=self.inputs["Input"].meta.axistags.index('c') axisindex = channelAxis oldkey = list(key) oldkey.pop(axisindex) inShape = self.inputs["Input"].meta.shape hasChannelAxis = (self.Input.meta.axistags.axisTypeCount(vigra.AxisType.Channels) > 0) #if (self.Input.meta.axistags.axisTypeCount(vigra.AxisType.Channels) == 0): # noChannels = True inAxistags = self.inputs["Input"].meta.axistags shape = self.outputs["Output"].meta.shape axistags = self.outputs["Output"].meta.axistags result = result.view(vigra.VigraArray) result.axistags = copy.copy(axistags) hasTimeAxis = self.inputs["Input"].meta.axistags.axisTypeCount(vigra.AxisType.Time) timeAxis=self.inputs["Input"].meta.axistags.index('t') subkey = popFlagsFromTheKey(key,axistags,'c') subshape=popFlagsFromTheKey(shape,axistags,'c') at2 = copy.copy(axistags) at2.dropChannelAxis() subshape=popFlagsFromTheKey(subshape,at2,'t') subkey = popFlagsFromTheKey(subkey,at2,'t') oldstart, oldstop = roi.sliceToRoi(key, shape) start, stop = roi.sliceToRoi(subkey,subkey) maxSigma = max(0.7,self.maxSigma) #we use 0.7 as an approximation of not doing any smoothing #smoothing was already applied previously # The region of the smoothed image we need to give to the feature filter (in terms of INPUT coordinates) # 0.7, because the features receive a pre-smoothed array and don't need much of a neighborhood vigOpSourceStart, vigOpSourceStop = roi.enlargeRoiForHalo(start, stop, subshape, 0.7, self.WINDOW_SIZE) # The region of the input that we need to give to the smoothing operator (in terms of INPUT coordinates) newStart, newStop = roi.enlargeRoiForHalo(vigOpSourceStart, vigOpSourceStop, subshape, maxSigma, self.WINDOW_SIZE) newStartSmoother = roi.TinyVector(start - vigOpSourceStart) newStopSmoother = roi.TinyVector(stop - vigOpSourceStart) roiSmoother = roi.roiToSlice(newStartSmoother, newStopSmoother) # Translate coordinates (now in terms of smoothed image coordinates) vigOpSourceStart = roi.TinyVector(vigOpSourceStart - newStart) vigOpSourceStop = roi.TinyVector(vigOpSourceStop - newStart) readKey = roi.roiToSlice(newStart, newStop) writeNewStart = start - newStart writeNewStop = writeNewStart + stop - start treadKey=list(readKey) if hasTimeAxis: if timeAxis < channelAxis: treadKey.insert(timeAxis, key[timeAxis]) else: treadKey.insert(timeAxis-1, key[timeAxis]) if self.inputs["Input"].meta.axistags.axisTypeCount(vigra.AxisType.Channels) == 0: treadKey = popFlagsFromTheKey(treadKey,axistags,'c') else: treadKey.insert(channelAxis, slice(None,None,None)) treadKey=tuple(treadKey) req = self.inputs["Input"][treadKey] sourceArray = req.wait() req.clean() #req.result = None req.destination = None if sourceArray.dtype != numpy.float32: sourceArrayF = sourceArray.astype(numpy.float32) try: sourceArray.resize((1,), refcheck = False) except: pass del sourceArray sourceArray = sourceArrayF #if (self.Input.meta.axistags.axisTypeCount(vigra.AxisType.Channels) == 0): #add a channel dimension to make the code afterwards more uniform # sourceArray = sourceArray.view(numpy.ndarray) # sourceArray = sourceArray.reshape(sourceArray.shape+(1,)) sourceArrayV = sourceArray.view(vigra.VigraArray) sourceArrayV.axistags = copy.copy(inAxistags) dimCol = len(self.scales) dimRow = self.matrix.shape[0] sourceArraysForSigmas = [None]*dimCol #connect individual operators try: for j in range(dimCol): hasScale = False for i in range(dimRow): if self.matrix[i,j]: hasScale = True if not hasScale: continue destSigma = 1.0 if self.scales[j] > destSigma: tempSigma = math.sqrt(self.scales[j]**2 - destSigma**2) else: destSigma = 0.0 tempSigma = self.scales[j] vigOpSourceShape = list(vigOpSourceStop - vigOpSourceStart) if hasTimeAxis: if timeAxis < channelAxis: vigOpSourceShape.insert(timeAxis, ( oldstop - oldstart)[timeAxis]) else: vigOpSourceShape.insert(timeAxis-1, ( oldstop - oldstart)[timeAxis]) vigOpSourceShape.insert(channelAxis, inShape[channelAxis]) sourceArraysForSigmas[j] = numpy.ndarray(tuple(vigOpSourceShape),numpy.float32) for i,vsa in enumerate(sourceArrayV.timeIter()): droi = (tuple(vigOpSourceStart._asint()), tuple(vigOpSourceStop._asint())) tmp_key = getAllExceptAxis(len(sourceArraysForSigmas[j].shape),timeAxis, i) sourceArraysForSigmas[j][tmp_key] = vigra.filters.gaussianSmoothing(vsa,tempSigma, roi = droi, window_size = self.WINDOW_SIZE ) else: droi = (tuple(vigOpSourceStart._asint()), tuple(vigOpSourceStop._asint())) sourceArraysForSigmas[j] = vigra.filters.gaussianSmoothing(sourceArrayV, sigma = tempSigma, roi = droi, window_size = self.WINDOW_SIZE) except RuntimeError as e: if e.message.find('kernel longer than line') > -1: message = "Feature computation error:\nYour image is too small to apply a filter with sigma=%.1f. Please select features with smaller sigmas." % self.scales[j] raise RuntimeError(message) else: raise e del sourceArrayV try: sourceArray.resize((1,), refcheck = False) except ValueError: # Sometimes this fails, but that's okay. logger.debug("Failed to free array memory.") del sourceArray closures = [] #connect individual operators for i in range(dimRow): for j in range(dimCol): val=self.matrix[i,j] if val: vop= self.featureOps[i][j] oslot = vop.outputs["Output"] req = None #inTagKeys = [ax.key for ax in oslot.meta.axistags] #print inTagKeys, flag if hasChannelAxis: slices = oslot.meta.shape[axisindex] if cnt + slices >= rroi.start[axisindex] and rroi.start[axisindex]-cnt<slices and rroi.start[axisindex]+written<rroi.stop[axisindex]: begin = 0 if cnt < rroi.start[axisindex]: begin = rroi.start[axisindex] - cnt end = slices if cnt + end > rroi.stop[axisindex]: end -= cnt + end - rroi.stop[axisindex] key_ = copy.copy(oldkey) key_.insert(axisindex, slice(begin, end, None)) reskey = [slice(None, None, None) for x in range(len(result.shape))] reskey[axisindex] = slice(written, written+end-begin, None) destArea = result[tuple(reskey)] #readjust the roi for the new source array roiSmootherList = list(roiSmoother) roiSmootherList.insert(axisindex, slice(begin, end, None)) if hasTimeAxis: # The time slice in the ROI doesn't matter: # The sourceArrayParameter below overrides the input data to be used. roiSmootherList.insert(timeAxis, 0) roiSmootherRegion = SubRegion(oslot, pslice=roiSmootherList) closure = partial(oslot.operator.execute, oslot, (), roiSmootherRegion, destArea, sourceArray = sourceArraysForSigmas[j]) closures.append(closure) written += end - begin cnt += slices else: if cnt>=rroi.start[axisindex] and rroi.start[axisindex] + written < rroi.stop[axisindex]: reskey = [slice(None, None, None) for x in range(len(result.shape))] slices = oslot.meta.shape[axisindex] reskey[axisindex]=slice(written, written+slices, None) #print "key: ", key, "reskey: ", reskey, "oldkey: ", oldkey, "resshape:", result.shape #print "roiSmoother:", roiSmoother destArea = result[tuple(reskey)] #print "destination area:", destArea.shape logger.debug(oldkey, destArea.shape, sourceArraysForSigmas[j].shape) oldroi = SubRegion(oslot, pslice=oldkey) #print "passing roi:", oldroi closure = partial(oslot.operator.execute, oslot, (), oldroi, destArea, sourceArray = sourceArraysForSigmas[j]) closures.append(closure) written += 1 cnt += 1 pool = RequestPool() for c in closures: r = pool.request(c) pool.wait() pool.clean() for i in range(len(sourceArraysForSigmas)): if sourceArraysForSigmas[i] is not None: try: sourceArraysForSigmas[i].resize((1,)) except: sourceArraysForSigmas[i] = None ###################################################3 class OpPixelFeaturesInterpPresmoothed(Operator): name="OpPixelFeaturesPresmoothed" category = "Vigra filter" inputSlots = [InputSlot("Input"), InputSlot("Matrix"), InputSlot("Scales"), InputSlot("FeatureIds"), InputSlot("InterpolationScaleZ")] # The selection of features to compute outputSlots = [OutputSlot("Output"), # The entire block of features as a single image (many channels) OutputSlot("Features", level=1)] # Each feature image listed separately, with feature name provided in metadata # Specify a default set & order for the features we compute DefaultFeatureIds = [ 'GaussianSmoothing', 'LaplacianOfGaussian', 'StructureTensorEigenvalues', 'HessianOfGaussianEigenvalues', 'GaussianGradientMagnitude', 'DifferenceOfGaussians' ] WINDOW_SIZE = 3.5 def __init__(self, *args, **kwargs): Operator.__init__(self, *args, **kwargs) self.source = OpArrayPiper(parent=self) self.source.inputs["Input"].connect(self.inputs["Input"]) self.stacker = OpMultiArrayStacker(parent=self) self.multi = Op50ToMulti(parent=self) self.stacker.inputs["Images"].connect(self.multi.outputs["Outputs"]) # Give our feature IDs input a default value (connected out of the box, but can be changed) self.inputs["FeatureIds"].setValue( self.DefaultFeatureIds ) def getInvalidScales(self): """ Check each of the scales the user selected against the shape of the input dataset (in space only). Return a list of the selected scales that are too large for the input dataset. .. note:: This function is NOT called automatically. Clients are expected to call it after configuring the operator, before they attempt to execute() the operator. If this function returns a non-empty list of scales, then calling execute() would generate errors. """ invalid_scales = [] z_scale = self.InterpolationScaleZ.value tagged_shape = self.Input.meta.getTaggedShape() tagged_shape['z'] = tagged_shape['z']*z_scale spatial_axes_shape = filter( lambda (k,v): k in 'xyz', tagged_shape.items() ) spatial_shape = zip( *spatial_axes_shape )[1] for j, scale in enumerate(self.scales): if self.matrix[:,j].any(): if (scale * self.WINDOW_SIZE > numpy.array(spatial_shape)).any(): invalid_scales.append( scale ) return invalid_scales def setupOutputs(self): if self.inputs["Scales"].connected() and self.inputs["Matrix"].connected(): self.stacker.inputs["Images"].disconnect() self.scales = self.inputs["Scales"].value self.matrix = self.inputs["Matrix"].value if not isinstance(self.matrix, numpy.ndarray): raise RuntimeError("OpPixelFeatures: Please input a numpy.ndarray as 'Matrix'") dimCol = len(self.scales) dimRow = len(self.inputs["FeatureIds"].value) assert dimRow== self.matrix.shape[0], "Please check the matrix or the scales they are not the same (scales = %r, matrix.shape = %r)" % (self.scales, self.matrix.shape) assert dimCol== self.matrix.shape[1], "Please check the matrix or the scales they are not the same (scales = %r, matrix.shape = %r)" % (self.scales, self.matrix.shape) featureNameArray =[] oparray = [] for j in range(dimRow): oparray.append([]) featureNameArray.append([]) self.newScales = [] for j in range(dimCol): destSigma = 1.0 if self.scales[j] > destSigma: self.newScales.append(destSigma) else: self.newScales.append(self.scales[j]) logger.debug("Replacing scale %f with new scale %f" %(self.scales[j], self.newScales[j])) for i, featureId in enumerate(self.inputs["FeatureIds"].value): if featureId == 'GaussianSmoothing': for j in range(dimCol): oparray[i].append(OpGaussianSmoothing(self)) oparray[i][j].inputs["Input"].connect(self.source.outputs["Output"]) oparray[i][j].inputs["sigma"].setValue(self.newScales[j]) featureNameArray[i].append("Gaussian Smoothing (σ=" + str(self.scales[j]) + ")") elif featureId == 'LaplacianOfGaussian': for j in range(dimCol): oparray[i].append(OpLaplacianOfGaussian(self)) oparray[i][j].inputs["Input"].connect(self.source.outputs["Output"]) oparray[i][j].inputs["scale"].setValue(self.newScales[j]) featureNameArray[i].append("Laplacian of Gaussian (σ=" + str(self.scales[j]) + ")") elif featureId == 'StructureTensorEigenvalues': for j in range(dimCol): oparray[i].append(OpStructureTensorEigenvalues(self)) oparray[i][j].inputs["Input"].connect(self.source.outputs["Output"]) # Note: If you need to change the inner or outer scale, # you must make a new feature (with a new feature ID) and # leave this feature here to preserve backwards compatibility oparray[i][j].inputs["innerScale"].setValue(self.newScales[j]) oparray[i][j].inputs["outerScale"].setValue(self.newScales[j]*0.5) featureNameArray[i].append("Structure Tensor Eigenvalues (σ=" + str(self.scales[j]) + ")") elif featureId == 'HessianOfGaussianEigenvalues': for j in range(dimCol): oparray[i].append(OpHessianOfGaussianEigenvalues(self)) oparray[i][j].inputs["Input"].connect(self.source.outputs["Output"]) oparray[i][j].inputs["scale"].setValue(self.newScales[j]) featureNameArray[i].append("Hessian of Gaussian Eigenvalues (σ=" + str(self.scales[j]) + ")") elif featureId == 'GaussianGradientMagnitude': for j in range(dimCol): oparray[i].append(OpGaussianGradientMagnitude(self)) oparray[i][j].inputs["Input"].connect(self.source.outputs["Output"]) oparray[i][j].inputs["sigma"].setValue(self.newScales[j]) featureNameArray[i].append("Gaussian Gradient Magnitude (σ=" + str(self.scales[j]) + ")") elif featureId == 'DifferenceOfGaussians': for j in range(dimCol): oparray[i].append(OpDifferenceOfGaussians(self)) oparray[i][j].inputs["Input"].connect(self.source.outputs["Output"]) # Note: If you need to change sigma0 or sigma1, you must make a new # feature (with a new feature ID) and leave this feature here # to preserve backwards compatibility oparray[i][j].inputs["sigma0"].setValue(self.newScales[j]) oparray[i][j].inputs["sigma1"].setValue(self.newScales[j]*0.66) featureNameArray[i].append("Difference of Gaussians (σ=" + str(self.scales[j]) + ")") #disconnecting all Operators for islot in self.multi.inputs.values(): islot.disconnect() channelCount = 0 featureCount = 0 self.Features.resize( 0 ) self.featureOutputChannels = [] #connect individual operators for i in range(dimRow): for j in range(dimCol): if self.matrix[i,j]: # Feature names are provided via metadata oparray[i][j].outputs["Output"].meta.description = featureNameArray[i][j] self.multi.inputs["Input%02d" %(i*dimCol+j)].connect(oparray[i][j].outputs["Output"]) logger.debug("connected Input%02d of self.multi" %(i*dimCol+j)) # Prepare the individual features featureCount += 1 self.Features.resize( featureCount ) featureMeta = oparray[i][j].outputs["Output"].meta featureChannels = featureMeta.shape[ featureMeta.axistags.index('c') ] self.Features[featureCount-1].meta.assignFrom( featureMeta ) self.featureOutputChannels.append( (channelCount, channelCount + featureChannels) ) channelCount += featureChannels #additional connection with FakeOperator if (self.matrix==0).all(): fakeOp = OpGaussianSmoothing(parent=self) fakeOp.inputs["Input"].connect(self.source.outputs["Output"]) fakeOp.inputs["sigma"].setValue(10) self.multi.inputs["Input%02d" %(i*dimCol+j+1)].connect(fakeOp.outputs["Output"]) self.multi.inputs["Input%02d" %(i*dimCol+j+1)].disconnect() stackerShape = list(self.Input.meta.shape) stackerShape[ self.Input.meta.axistags.index('c') ] = 0 self.stacker.Output.meta.shape = tuple(stackerShape) self.stacker.Output.meta.axistags = self.Input.meta.axistags else: self.stacker.inputs["AxisFlag"].setValue('c') self.stacker.inputs["AxisIndex"].setValue(self.source.outputs["Output"].meta.axistags.index('c')) self.stacker.inputs["Images"].connect(self.multi.outputs["Outputs"]) self.maxSigma = 0 #determine maximum sigma for i in range(dimRow): for j in range(dimCol): val=self.matrix[i,j] if val: self.maxSigma = max(self.scales[j],self.maxSigma) self.featureOps = oparray # Output meta is a modified copy of the input meta self.Output.meta.assignFrom(self.Input.meta) self.Output.meta.dtype = numpy.float32 self.Output.meta.axistags = self.stacker.Output.meta.axistags self.Output.meta.shape = self.stacker.Output.meta.shape def propagateDirty(self, inputSlot, subindex, roi): if inputSlot == self.Input: channelAxis = self.Input.meta.axistags.index('c') numChannels = self.Input.meta.shape[channelAxis] dirtyChannels = roi.stop[channelAxis] - roi.start[channelAxis] # If all the input channels were dirty, the dirty output region is a contiguous block if dirtyChannels == numChannels: dirtyKey = roiToSlice(roi.start, roi.stop) dirtyKey[channelAxis] = slice(None) dirtyRoi = sliceToRoi(dirtyKey, self.Output.meta.shape) self.Output.setDirty(dirtyRoi[0], dirtyRoi[1]) else: # Only some input channels were dirty, # so we must mark each dirty output region separately. numFeatures = self.Output.meta.shape[channelAxis] / numChannels for featureIndex in range(numFeatures): startChannel = numChannels*featureIndex + roi.start[channelAxis] stopChannel = startChannel + roi.stop[channelAxis] dirtyRoi = copy.copy(roi) dirtyRoi.start[channelAxis] = startChannel dirtyRoi.stop[channelAxis] = stopChannel self.Output.setDirty(dirtyRoi) elif (inputSlot == self.Matrix or inputSlot == self.Scales or inputSlot == self.FeatureIds or inputSlot == self.InterpolationScaleZ): self.Output.setDirty(slice(None)) else: assert False, "Unknown dirty input slot." def execute(self, slot, subindex, rroi, result): assert slot == self.Features or slot == self.Output if slot == self.Features: key = roiToSlice(rroi.start, rroi.stop) index = subindex[0] subslot = self.Features[index] key = list(key) channelIndex = self.Input.meta.axistags.index('c') # Translate channel slice to the correct location for the output slot. key[channelIndex] = slice(self.featureOutputChannels[index][0] + key[channelIndex].start, self.featureOutputChannels[index][0] + key[channelIndex].stop) rroi = SubRegion(self.Output, pslice=key) # Get output slot region for this channel return self.execute(self.Output, (), rroi, result) elif slot == self.outputs["Output"]: key = rroi.toSlice() cnt = 0 written = 0 assert (rroi.stop<=self.outputs["Output"].meta.shape).all() assert self.inputs["Input"].meta.axistags.axisTypeCount(vigra.AxisType.Channels)!=0, "Data without channels is not yet supported" flag = 'c' channelAxis=self.inputs["Input"].meta.axistags.index('c') assert self.inputs["Input"].meta.shape[channelAxis]==1, "Multichannel data is not yet supported" #assert len(self.inputs["Input"].meta.shape)==4, "Only 3d data, as the interpolation is in z" axisindex = channelAxis oldkey = list(key) oldkey.pop(axisindex) inShape = self.inputs["Input"].meta.shape shape = self.outputs["Output"].meta.shape axistags = self.inputs["Input"].meta.axistags result = result.view(vigra.VigraArray) result.axistags = copy.copy(axistags) hasTimeAxis = self.inputs["Input"].meta.axistags.axisTypeCount(vigra.AxisType.Time) timeAxis=self.inputs["Input"].meta.axistags.index('t') subkey = popFlagsFromTheKey(key,axistags,'c') subshape=popFlagsFromTheKey(shape,axistags,'c') at2 = copy.copy(axistags) at2.dropChannelAxis() subshape=popFlagsFromTheKey(subshape,at2,'t') subkey = popFlagsFromTheKey(subkey,at2,'t') oldstart, oldstop = roi.sliceToRoi(key, shape) start, stop = roi.sliceToRoi(subkey,subkey) maxSigma = max(0.7,self.maxSigma) #maxSigma = max(1., self.maxSigma) # The region of the smoothed image we need to give to the feature filter (in terms of INPUT coordinates) # all this has to be done for the interpolated array! zaxis = axistags.index('z') scaleZ = self.InterpolationScaleZ.value newRangeZ = scaleZ*(shape[zaxis]-1)+1 interpShape = list(copy.copy(popFlagsFromTheKey(shape, axistags, 'c'))) interpShape[zaxis] = numpy.long(newRangeZ) #TODO: this insanity can most probably be avoided by using taggedShape #FIXME: we assume that time is first. Whatever. if hasTimeAxis: assert timeAxis==0 interpShape = interpShape[1:] interp_start = copy.copy(start) interp_stop = copy.copy(stop) interp_start[zaxis] = scaleZ*interp_start[zaxis] interp_stop[zaxis] = scaleZ*interp_stop[zaxis]-1 vigOpSourceStart, vigOpSourceStop = roi.enlargeRoiForHalo(interp_start, interp_stop, interpShape, 0.7, window = self.WINDOW_SIZE) # The region of the input that we need to give to the smoothing operator (in terms of INPUT coordinates) newStart, newStop = roi.enlargeRoiForHalo(vigOpSourceStart, vigOpSourceStop, interpShape, maxSigma, window = self.WINDOW_SIZE) vigOpOffset = start - vigOpSourceStart newStartSmoother = roi.TinyVector(interp_start - vigOpSourceStart) newStopSmoother = roi.TinyVector(interp_stop - vigOpSourceStart) roiSmoother = roi.roiToSlice(newStartSmoother, newStopSmoother) # Translate coordinates (now in terms of smoothed image coordinates) vigOpSourceStart = roi.TinyVector(vigOpSourceStart - newStart) vigOpSourceStop = roi.TinyVector(vigOpSourceStop - newStart) #adjust the readkey, as we read from the non-interpolated image newStartNI = copy.copy(newStart) newStopNI = copy.copy(newStop) newStartNI[zaxis] = numpy.floor(float(newStart[zaxis])/scaleZ) newStopNI[zaxis] = numpy.ceil(float(newStop[zaxis])/scaleZ) readKey = roi.roiToSlice(newStartNI, newStopNI) #interpolation is applied on a region read with the above key. In x-y it should just read everything newStartI = copy.copy(newStart) newStopI = copy.copy(newStop) newStopI = newStopI - newStartI newStartI = newStartI - newStartI newStartI[zaxis] = newStart[zaxis]-scaleZ*newStartNI[zaxis] newStopI[zaxis] = newStop[zaxis]-scaleZ*newStartNI[zaxis] readKeyInterp = roi.roiToSlice(newStartI, newStopI) writeNewStart = start - newStart writeNewStop = writeNewStart + stop - start treadKey=list(readKey) treadKeyInterp = list(readKeyInterp) if hasTimeAxis: if timeAxis < channelAxis: treadKey.insert(timeAxis, key[timeAxis]) treadKeyInterp.insert(timeAxis, key[timeAxis]) else: treadKey.insert(timeAxis-1, key[timeAxis]) treadKey.insert(timeAxis-1, key[timeAxis]) if self.inputs["Input"].meta.axistags.axisTypeCount(vigra.AxisType.Channels) == 0: treadKey = popFlagsFromTheKey(treadKey,axistags,'c') treadKeyInterp = popFlagsFromTheKey(treadKeyInterp,axistags,'c') else: treadKey.insert(channelAxis, slice(None,None,None)) treadKeyInterp.insert(channelAxis, slice(None,None,None)) treadKey=tuple(treadKey) req = self.inputs["Input"][treadKey] sourceArray = req.wait() #req.result = None req.clean() req.destination = None if sourceArray.dtype != numpy.float32: sourceArrayF = sourceArray.astype(numpy.float32) del sourceArray sourceArray = sourceArrayF sourceArrayV = sourceArray.view(vigra.VigraArray) sourceArrayV.axistags = copy.copy(axistags) ########## new stuff ##################### zaxis = axistags.index('z') scaleZ = self.InterpolationScaleZ.value newRangeZ = scaleZ*(sourceArrayV.shape[zaxis]-1)+1 interpShape = list(sourceArrayV.shape) interpShape[zaxis] = numpy.long(newRangeZ) interpShape = popFlagsFromTheKey(interpShape, axistags, 'c') interpShape = popFlagsFromTheKey(interpShape, at2, 't') #FIXME: this won't work with multichannel data. Don't care for now. sourceArrayVInterp = vigra.sampling.resizeVolumeSplineInterpolation(sourceArrayV.squeeze(), shape=interpShape) interpShapeFull = sourceArrayVInterp.shape+(1,) if hasTimeAxis: interpShapeFull = (1,)+interpShapeFull sourceArrayVInterp = numpy.ndarray.reshape(sourceArrayVInterp, interpShapeFull) sourceArrayVInterp.axistags = copy.copy(axistags) sourceArrayVInterp = sourceArrayVInterp[treadKeyInterp] dimCol = len(self.scales) dimRow = self.matrix.shape[0] sourceArraysForSigmas = [None]*dimCol #connect individual operators for j in range(dimCol): hasScale = False for i in range(dimRow): if self.matrix[i,j]: hasScale = True if not hasScale: continue destSigma = 1.0 if self.scales[j] > destSigma: tempSigma = math.sqrt(self.scales[j]**2 - destSigma**2) else: destSigma = 0.0 tempSigma = self.scales[j] vigOpSourceShape = list(vigOpSourceStop - vigOpSourceStart) if hasTimeAxis: if timeAxis < channelAxis: vigOpSourceShape.insert(timeAxis, ( oldstop - oldstart)[timeAxis]) else: vigOpSourceShape.insert(timeAxis-1, ( oldstop - oldstart)[timeAxis]) vigOpSourceShape.insert(channelAxis, inShape[channelAxis]) logger.debug( "vigOpSourceShape: {}".format( vigOpSourceShape ) ) sourceArraysForSigmas[j] = numpy.ndarray(tuple(vigOpSourceShape),numpy.float32) for i,vsa in enumerate(sourceArrayVInterp.timeIter()): droi = (tuple(vigOpSourceStart._asint()), tuple(vigOpSourceStop._asint())) tmp_key = getAllExceptAxis(len(sourceArraysForSigmas[j].shape),timeAxis, i) sourceArraysForSigmas[j][tmp_key] = vigra.filters.gaussianSmoothing(vsa,tempSigma, roi = droi, window_size = self.WINDOW_SIZE ) else: droi = (tuple(vigOpSourceStart._asint()), tuple(vigOpSourceStop._asint())) try: sourceArraysForSigmas[j] = vigra.filters.gaussianSmoothing(sourceArrayVInterp, sigma = tempSigma, roi = droi, window_size = self.WINDOW_SIZE) except RuntimeError: logger.error( "interpolated array: {} {}".format( sourceArrayVInterp.shape, sourceArrayVInterp.axistags ) ) logger.error( "source array: {} {}".format( sourceArrayV.shape, sourceArrayV.axistags ) ) logger.error( "droi: {}".format( droi ) ) raise del sourceArrayV del sourceArrayVInterp try: sourceArray.resize((1,), refcheck = False) except ValueError: # Sometimes this fails, but that's okay. logger.debug("Failed to free array memory.") del sourceArray closures = [] #connect individual operators for i in range(dimRow): for j in range(dimCol): val=self.matrix[i,j] if val: vop= self.featureOps[i][j] oslot = vop.outputs["Output"] req = None inTagKeys = [ax.key for ax in oslot.meta.axistags] if flag in inTagKeys: slices = oslot.meta.shape[axisindex] if cnt + slices >= rroi.start[axisindex] and rroi.start[axisindex]-cnt<slices and rroi.start[axisindex]+written<rroi.stop[axisindex]: begin = 0 if cnt < rroi.start[axisindex]: begin = rroi.start[axisindex] - cnt end = slices if cnt + end > rroi.stop[axisindex]: end -= cnt + end - rroi.stop[axisindex] #call feature computation per slice, only for the original data slices nz = scaleZ*(oldkey[zaxis].stop-oldkey[zaxis].start) roiSmootherList = list(roiSmoother) zrange = range(roiSmootherList[zaxis].start, roiSmootherList[zaxis].stop, scaleZ) for iz, z in enumerate(zrange): #key_ = copy.copy(oldkey) key_ = list(oldkey) key_.insert(axisindex, slice(begin, end, None)) #readjust the roi for the new source array? newRoi = copy.copy(roiSmootherList) newRoi.insert(axisindex, slice(begin, end, None)) newRoi[zaxis] = slice(z, z+1, None) newRoi = SubRegion(None, pslice=newRoi) #print "roi smoother:", roiSmoother zStart, zStop = roi.enlargeRoiForHalo((z,), (z+1,), (sourceArraysForSigmas[j].shape[zaxis],), 0.7, self.WINDOW_SIZE) sourceKey = [] sourceKey.insert(axistags.index('x'), slice(None, None, None)) sourceKey.insert(axistags.index('y'), slice(None, None, None)) sourceKey.insert(zaxis, slice(zStart, zStop, None)) reskey = [slice(None, None, None) for x in range(len(result.shape))] reskey[axisindex] = slice(written, written+end-begin, None) reskey[zaxis] = slice(iz, iz+1, None) destArea = result[tuple(reskey)] roi_ = SubRegion(oslot, pslice=key_) #print "passing to filter:", sourceArraysForSigmas[j][0, 0, zStart:zStop, 0] #closure = partial(oslot.operator.execute, oslot, (), roi_, destArea, sourceArray = sourceArraysForSigmas[j][sourceKey]) closure = partial(oslot.operator.execute, oslot, (), newRoi, destArea, sourceArraysForSigmas[j][sourceKey]) closures.append(closure) written += end - begin cnt += slices else: if cnt>=rroi.start[axisindex] and rroi.start[axisindex] + written < rroi.stop[axisindex]: reskey = copy.copy(oldkey) reskey.insert(axisindex, written) #print "key: ", key, "reskey: ", reskey, "oldkey: ", oldkey #print "result: ", result.shape, "inslot:", inSlot.shape destArea = result[tuple(reskey)] logger.debug(oldkey, destArea.shape, sourceArraysForSigmas[j].shape) oldroi = SubRegion(oslot, pslice=oldkey) closure = partial(oslot.operator.execute, oslot, (), oldroi, destArea, sourceArray = sourceArraysForSigmas[j]) closures.append(closure) written += 1 cnt += 1 pool = RequestPool() for c in closures: r = pool.request(c) pool.wait() pool.clean() for i in range(len(sourceArraysForSigmas)): if sourceArraysForSigmas[i] is not None: try: sourceArraysForSigmas[i].resize((1,)) except: sourceArraysForSigmas[i] = None def getAllExceptAxis(ndim,index,slicer): res= [slice(None, None, None)] * ndim res[index] = slicer return tuple(res) class OpBaseVigraFilter(OpArrayPiper): inputSlots = [InputSlot("Input"), InputSlot("sigma", stype = "float")] outputSlots = [OutputSlot("Output")] name = "OpBaseVigraFilter" category = "Vigra filter" vigraFilter = None outputDtype = numpy.float32 inputDtype = numpy.float32 supportsOut = True window_size_feature = 2 window_size_smoother = 3.5 supportsRoi = False supportsWindow = False def execute(self, slot, subindex, rroi, result, sourceArray=None): assert len(subindex) == self.Output.level == 0 key = roiToSlice(rroi.start, rroi.stop) kwparams = {} for islot in self.inputs.values(): if islot.name != "Input": kwparams[islot.name] = islot.value if self.inputs.has_key("sigma"): sigma = self.inputs["sigma"].value elif self.inputs.has_key("scale"): sigma = self.inputs["scale"].value elif self.inputs.has_key("sigma0"): sigma = self.inputs["sigma0"].value elif self.inputs.has_key("innerScale"): sigma = self.inputs["innerScale"].value windowSize = 3.5 if self.supportsWindow: kwparams['window_size']=self.window_size_feature windowSize = self.window_size_smoother largestSigma = max(0.7,sigma) #we use 0.7 as an approximation of not doing any smoothing #smoothing was already applied previously shape = self.outputs["Output"].meta.shape axistags = self.inputs["Input"].meta.axistags hasChannelAxis = self.inputs["Input"].meta.axistags.axisTypeCount(vigra.AxisType.Channels) channelAxis=self.inputs["Input"].meta.axistags.index('c') hasTimeAxis = self.inputs["Input"].meta.axistags.axisTypeCount(vigra.AxisType.Time) timeAxis=self.inputs["Input"].meta.axistags.index('t') zAxis = self.inputs["Input"].meta.axistags.index('z') subkey = popFlagsFromTheKey(key,axistags,'c') subshape=popFlagsFromTheKey(shape,axistags,'c') at2 = copy.copy(axistags) at2.dropChannelAxis() subshape=popFlagsFromTheKey(subshape,at2,'t') subkey = popFlagsFromTheKey(subkey,at2,'t') oldstart, oldstop = roi.sliceToRoi(key, shape) start, stop = roi.sliceToRoi(subkey,subkey) if sourceArray is not None and zAxis<len(axistags): if timeAxis>zAxis: subshape[at2.index('z')]=sourceArray.shape[zAxis] else: subshape[at2.index('z')-1]=sourceArray.shape[zAxis] newStart, newStop = roi.enlargeRoiForHalo(start, stop, subshape, 0.7, window = windowSize) readKey = roi.roiToSlice(newStart, newStop) writeNewStart = start - newStart writeNewStop = writeNewStart + stop - start if (writeNewStart == 0).all() and (newStop == writeNewStop).all(): fullResult = True else: fullResult = False writeKey = roi.roiToSlice(writeNewStart, writeNewStop) writeKey = list(writeKey) if timeAxis < channelAxis: writeKey.insert(channelAxis-1, slice(None,None,None)) else: writeKey.insert(channelAxis, slice(None,None,None)) writeKey = tuple(writeKey) #print writeKey channelsPerChannel = self.resultingChannels() if self.supportsRoi is False and largestSigma > 5: logger.warn("WARNING: operator", self.name, "does not support roi !!") i2 = 0 for i in range(int(numpy.floor(1.0 * oldstart[channelAxis]/channelsPerChannel)),int(numpy.ceil(1.0 * oldstop[channelAxis]/channelsPerChannel))): newReadKey = list(readKey) #add channel and time axis if needed if hasTimeAxis: if channelAxis > timeAxis: newReadKey.insert(timeAxis, key[timeAxis]) else: newReadKey.insert(timeAxis-1, key[timeAxis]) if hasChannelAxis: newReadKey.insert(channelAxis, slice(i, i+1, None)) if sourceArray is None: req = self.inputs["Input"][newReadKey] t = req.wait() else: if hasChannelAxis: t = sourceArray[getAllExceptAxis(len(newReadKey),channelAxis,slice(i,i+1,None) )] else: fullkey = [slice(None, None, None)]*len(newReadKey) t = sourceArray[fullkey] t = numpy.require(t, dtype=self.inputDtype) t = t.view(vigra.VigraArray) t.axistags = copy.copy(axistags) t = t.insertChannelAxis() sourceBegin = 0 if oldstart[channelAxis] > i * channelsPerChannel: sourceBegin = oldstart[channelAxis] - i * channelsPerChannel sourceEnd = channelsPerChannel if oldstop[channelAxis] < (i+1) * channelsPerChannel: sourceEnd = channelsPerChannel - ((i+1) * channelsPerChannel - oldstop[channelAxis]) destBegin = i2 destEnd = i2 + sourceEnd - sourceBegin if channelsPerChannel>1: tkey=getAllExceptAxis(len(shape),channelAxis,slice(destBegin,destEnd,None)) resultArea = result[tkey] else: tkey=getAllExceptAxis(len(shape),channelAxis,slice(i2,i2+1,None)) resultArea = result[tkey] i2 += destEnd-destBegin supportsOut = self.supportsOut if (destEnd-destBegin != channelsPerChannel): supportsOut = False supportsOut = False #disable for now due to vigra crashes! #FIXME for step,image in enumerate(t.timeIter()): nChannelAxis = channelAxis - 1 if timeAxis > channelAxis or not hasTimeAxis: nChannelAxis = channelAxis twriteKey=getAllExceptAxis(image.ndim, nChannelAxis, slice(sourceBegin,sourceEnd,None)) if hasTimeAxis > 0: tresKey = getAllExceptAxis(resultArea.ndim, timeAxis, step) else: tresKey = slice(None, None,None) #print tresKey, twriteKey, resultArea.shape, temp.shape vres = resultArea[tresKey] if supportsOut: if self.supportsRoi: vroi = (tuple(writeNewStart._asint()), tuple(writeNewStop._asint())) try: vres = vres.view(vigra.VigraArray) vres.axistags = copy.copy(image.axistags) logger.debug( "FAST LANE {} {} {} {}".format( self.name, vres.shape, image[twriteKey].shape, vroi ) ) temp = self.vigraFilter(image[twriteKey], roi = vroi,out=vres, **kwparams) except: logger.error( "{} {} {} {}".format(self.name, image.shape, vroi, kwparams) ) raise else: try: temp = self.vigraFilter(image, **kwparams) except: logger.error( "{} {} {} {}".format(self.name, image.shape, vroi, kwparams) ) raise temp=temp[writeKey] else: if self.supportsRoi: vroi = (tuple(writeNewStart._asint()), tuple(writeNewStop._asint())) try: temp = self.vigraFilter(image, roi = vroi, **kwparams) except Exception, e: logger.error( "EXCEPT 2.1 {} {} {} {}".format( self.name, image.shape, vroi, kwparams ) ) traceback.print_exc(e) import sys sys.exit(1) else: try: temp = self.vigraFilter(image, **kwparams) except Exception, e: logger.error( "EXCEPT 2.2 {} {} {} {}".format( self.name, image.shape, kwparams ) ) traceback.print_exc(e) sys.exit(1) temp=temp[writeKey] try: vres[:] = temp[twriteKey] except: logger.error( "EXCEPT3 {} {} {}".format( vres.shape, temp.shape, twriteKey ) ) logger.error( "EXCEPT3 {} {} {}".format( resultArea.shape, tresKey, twriteKey ) ) logger.error( "EXCEPT3 {} {} {}".format( step, t.shape, timeAxis ) ) raise #print "(in.min=",image.min(),",in.max=",image.max(),") (vres.min=",vres.min(),",vres.max=",vres.max(),")" def setupOutputs(self): # Output meta starts with a copy of the input meta, which is then modified self.Output.meta.assignFrom(self.Input.meta) numChannels = 1 inputSlot = self.inputs["Input"] if inputSlot.meta.axistags.axisTypeCount(vigra.AxisType.Channels) > 0: channelIndex = self.inputs["Input"].meta.axistags.channelIndex numChannels = self.inputs["Input"].meta.shape[channelIndex] inShapeWithoutChannels = popFlagsFromTheKey( self.inputs["Input"].meta.shape,self.inputs["Input"].meta.axistags,'c') else: inShapeWithoutChannels = inputSlot.meta.shape channelIndex = len(inputSlot.meta.shape) self.outputs["Output"].meta.dtype = self.outputDtype p = self.inputs["Input"].partner at = copy.copy(inputSlot.meta.axistags) if at.axisTypeCount(vigra.AxisType.Channels) == 0: at.insertChannelAxis() self.outputs["Output"].meta.axistags = at channelsPerChannel = self.resultingChannels() inShapeWithoutChannels = list(inShapeWithoutChannels) inShapeWithoutChannels.insert(channelIndex,numChannels * channelsPerChannel) self.outputs["Output"].meta.shape = tuple(inShapeWithoutChannels) if self.outputs["Output"].meta.axistags.axisTypeCount(vigra.AxisType.Channels) == 0: self.outputs["Output"].meta.axistags.insertChannelAxis() # The output data range is not necessarily the same as the input data range. if 'drange' in self.Output.meta: del self.Output.meta['drange'] def resultingChannels(self): raise RuntimeError('resultingChannels() not implemented') #difference of Gaussians def differenceOfGausssians(image,sigma0, sigma1,window_size, roi, out = None): """ difference of gaussian function""" return (vigra.filters.gaussianSmoothing(image,sigma0,window_size=window_size,roi = roi)-vigra.filters.gaussianSmoothing(image,sigma1,window_size=window_size,roi = roi)) def firstHessianOfGaussianEigenvalues(image, **kwargs): return vigra.filters.hessianOfGaussianEigenvalues(image, **kwargs)[...,0:1] def coherenceOrientationOfStructureTensor(image,sigma0, sigma1, window_size, out = None): """ coherence Orientation of Structure tensor function: input: M*N*1ch VigraArray sigma corresponding to the inner scale of the tensor scale corresponding to the outher scale of the tensor output: M*N*2 VigraArray, the firest channel correspond to coherence the second channel correspond to orientation """ #FIXME: make more general #assert image.spatialDimensions==2, "Only implemented for 2 dimensional images" assert len(image.shape)==2 or (len(image.shape)==3 and image.shape[2] == 1), "Only implemented for 2 dimensional images" st=vigra.filters.structureTensor(image, sigma0, sigma1, window_size = window_size) i11=st[:,:,0] i12=st[:,:,1] i22=st[:,:,2] if out is not None: assert out.shape[0] == image.shape[0] and out.shape[1] == image.shape[1] and out.shape[2] == 2 res = out else: res=numpy.ndarray((image.shape[0],image.shape[1],2)) res[:,:,0]=numpy.sqrt( (i22-i11)**2+4*(i12**2))/(i11-i22) res[:,:,1]=numpy.arctan(2*i12/(i22-i11))/numpy.pi +0.5 return res class OpDifferenceOfGaussians(OpBaseVigraFilter): name = "DifferenceOfGaussians" vigraFilter = staticmethod(differenceOfGausssians) outputDtype = numpy.float32 supportsOut = False supportsWindow = True supportsRoi = True inputSlots = [InputSlot("Input"), InputSlot("sigma0", stype = "float"), InputSlot("sigma1", stype = "float")] def resultingChannels(self): return 1 class OpGaussianSmoothing(OpBaseVigraFilter): name = "GaussianSmoothing" vigraFilter = staticmethod(vigra.filters.gaussianSmoothing) outputDtype = numpy.float32 supportsRoi = True supportsWindow = True supportsOut = True def resultingChannels(self): return 1 class OpHessianOfGaussianEigenvalues(OpBaseVigraFilter): name = "HessianOfGaussianEigenvalues" vigraFilter = staticmethod(vigra.filters.hessianOfGaussianEigenvalues) outputDtype = numpy.float32 supportsRoi = True supportsWindow = True supportsOut = True inputSlots = [InputSlot("Input"), InputSlot("scale", stype = "float")] def resultingChannels(self): temp = self.inputs["Input"].meta.axistags.axisTypeCount(vigra.AxisType.Space) return temp class OpStructureTensorEigenvalues(OpBaseVigraFilter): name = "StructureTensorEigenvalues" vigraFilter = staticmethod(vigra.filters.structureTensorEigenvalues) outputDtype = numpy.float32 supportsRoi = True supportsWindow = True supportsOut = True inputSlots = [InputSlot("Input"), InputSlot("innerScale", stype = "float"),InputSlot("outerScale", stype = "float")] def resultingChannels(self): temp = self.inputs["Input"].meta.axistags.axisTypeCount(vigra.AxisType.Space) return temp class OpHessianOfGaussianEigenvaluesFirst(OpBaseVigraFilter): name = "First Eigenvalue of Hessian Matrix" vigraFilter = staticmethod(firstHessianOfGaussianEigenvalues) outputDtype = numpy.float32 supportsOut = False supportsWindow = True supportsRoi = True inputSlots = [InputSlot("Input"), InputSlot("scale", stype = "float")] def resultingChannels(self): return 1 class OpHessianOfGaussian(OpBaseVigraFilter): name = "HessianOfGaussian" vigraFilter = staticmethod(vigra.filters.hessianOfGaussian) outputDtype = numpy.float32 supportsWindow = True supportsRoi = True supportsOut = True def resultingChannels(self): temp = self.inputs["Input"].meta.axistags.axisTypeCount(vigra.AxisType.Space)*(self.inputs["Input"].meta.axistags.axisTypeCount(vigra.AxisType.Space) + 1) / 2 return temp class OpGaussianGradientMagnitude(OpBaseVigraFilter): name = "GaussianGradientMagnitude" vigraFilter = staticmethod(vigra.filters.gaussianGradientMagnitude) outputDtype = numpy.float32 supportsRoi = True supportsWindow = True supportsOut = True def resultingChannels(self): return 1 class OpLaplacianOfGaussian(OpBaseVigraFilter): name = "LaplacianOfGaussian" vigraFilter = staticmethod(vigra.filters.laplacianOfGaussian) outputDtype = numpy.float32 supportsOut = True supportsRoi = True supportsWindow = True inputSlots = [InputSlot("Input"), InputSlot("scale", stype = "float")] def resultingChannels(self): return 1 class OpImageReader(Operator): """ Read an image using vigra.impex.readImage(). Supports 2D images (output as xyc) and also multi-page tiffs (output as zyxc). """ Filename = InputSlot(stype="filestring") Image = OutputSlot() def setupOutputs(self): filename = self.Filename.value info = vigra.impex.ImageInfo(filename) assert [tag.key for tag in info.getAxisTags()] == ['x', 'y', 'c'] shape_xyc = info.getShape() self.Image.meta.dtype = info.getDtype() self.Image.meta.prefer_2d = True numImages = vigra.impex.numberImages(filename) if numImages == 1: # For 2D, we use order xyc. self.Image.meta.shape = shape_xyc self.Image.meta.axistags = info.getAxisTags() else: # For 3D, we use reverse order: zyxc # Insert z-axis shape shape_zyxc = (numImages,) + tuple(reversed(shape_xyc[:-1])) + shape_xyc[-1:] self.Image.meta.shape = shape_zyxc # Insert z tag z_tag = vigra.defaultAxistags('z')[0] tags_xyc = [tag for tag in info.getAxisTags()] tags_zyxc = [z_tag] + list(reversed(tags_xyc[:-1])) + tags_xyc[-1:] self.Image.meta.axistags = vigra.AxisTags( tags_zyxc ) def execute(self, slot, subindex, rroi, result): filename = self.Filename.value if 'z' in self.Image.meta.getAxisKeys(): # Copy from each image slice into the corresponding slice of the result. roi_zyxc = numpy.array( [rroi.start, rroi.stop] ) for z_global, z_result in zip( range(*roi_zyxc[:,0]), range(result.shape[0]) ): full_slice = vigra.impex.readImage(filename, index=z_global) full_slice = full_slice.transpose(1,0,2) # xyc -> yxc assert full_slice.shape == self.Image.meta.shape[1:] result[z_result] = full_slice[roiToSlice( *roi_zyxc[:,1:] )] else: full_slice = vigra.impex.readImage(filename) assert full_slice.shape == self.Image.meta.shape roi_xyc = numpy.array( [rroi.start, rroi.stop] ) result[:] = full_slice[roiToSlice( *roi_xyc )] return result def propagateDirty(self, slot, subindex, roi): if slot == self.Filename: self.Image.setDirty() else: assert False, "Unknown dirty input slot."
stuarteberg/lazyflow
lazyflow/operators/vigraOperators.py
Python
lgpl-3.0
76,465
[ "Gaussian" ]
63389160d5bef42e81be3ecf5d74a764e5babdff7eac64518b78eb02984fd242
''' Module implementing the CUDA "standalone" device. ''' import os import inspect from collections import defaultdict, Counter import tempfile from distutils import ccompiler import re from itertools import chain import numpy as np from brian2.codegen.cpp_prefs import get_compiler_and_args from brian2.codegen.translation import make_statements from brian2.core.clocks import Clock, defaultclock from brian2.core.namespace import get_local_namespace from brian2.core.preferences import prefs, PreferenceError from brian2.core.variables import ArrayVariable, DynamicArrayVariable from brian2.parsing.rendering import CPPNodeRenderer from brian2.devices.device import all_devices from brian2.synapses.synapses import Synapses, SynapticPathway from brian2.utils.filetools import copy_directory, ensure_directory from brian2.utils.stringtools import get_identifiers, stripped_deindented_lines from brian2.codegen.generators.cpp_generator import c_data_type from brian2.utils.logger import get_logger from brian2.units import second from brian2.devices.cpp_standalone.device import CPPWriter, CPPStandaloneDevice from brian2.input.spikegeneratorgroup import SpikeGeneratorGroup from brian2cuda.utils.stringtools import replace_floating_point_literals from brian2cuda.utils.gputools import select_gpu, get_nvcc_path from brian2cuda.utils.logger import report_issue_message from .codeobject import CUDAStandaloneCodeObject, CUDAStandaloneAtomicsCodeObject __all__ = [] logger = get_logger('brian2.devices.cuda_standalone') class CUDAWriter(CPPWriter): def __init__(self, project_dir): self.project_dir = project_dir self.source_files = set() self.header_files = set() def write(self, filename, contents): logger.diagnostic(f'Writing file {filename}:\n{contents}') if filename.lower().endswith('.cu'): self.source_files.add(filename) if filename.lower().endswith('.cpp'): self.source_files.add(filename) elif filename.lower().endswith('.h'): self.header_files.add(filename) elif filename.endswith('.*'): self.write(filename[:-1]+'cu', contents.cu_file) self.write(filename[:-1]+'h', contents.h_file) return fullfilename = os.path.join(self.project_dir, filename) if os.path.exists(fullfilename): if open(fullfilename, 'r').read()==contents: return open(fullfilename, 'w').write(contents) class CUDAStandaloneDevice(CPPStandaloneDevice): ''' The `Device` used for CUDA standalone simulations. ''' def __init__(self): super(CUDAStandaloneDevice, self).__init__() ### Reset variables we don't need from CPPStandaloneDevice.__init__() # remove randomkit, which we don't use for CUDA Standalone self.include_dirs.remove('brianlib/randomkit') self.library_dirs.remove('brianlib/randomkit') # Add code line slots used in our benchmarks # TODO: Add to brian2 and remove here self.code_lines.update({'before_run': [], 'after_run': []}) ### Attributes specific to CUDAStandaloneDevice: # only true during first run call (relevant for synaptic pre/post ID deletion) self.first_run = True # the minimal supported GPU compute capability self.minimal_compute_capability = 3.5 # store the ID of the used GPU and it's compute capability self.gpu_id = None self.compute_capability = None # list of pre/post ID arrays that are not needed in device memory self.delete_synaptic_pre = {} self.delete_synaptic_post = {} # The following nested dictionary collects all codeobjects that use random # number generation (RNG). self.codeobjects_with_rng = { # All codeobjects that use the curand device api (binomial and # poisson with vectorized lambda) "device_api": { # This collects all codeobjects that run evey cycle of a clock "every_tick": [], # This collects all codeobjects that are running only once "single_tick": [] }, # All codeobjects that use the curand host api (rand, randn and poisson with # scalar lambda) "host_api": { # Dictionary of lists of codeobjects. Dictionary keys are the RNG types # (rand, randn, poisson_<idx>). For each `poisson(lamda)` function with # a different scalar lamda per codeobject, a new `poisson_<idx>` key is # added to the defaultdict. The dictionary values are lists of # codeobject. "all_runs": defaultdict(list), # Lists of defaultdict(list) with same structure as "all_runs", but now # codeobjects are seperate by `brian2.run()` calls (one list item per # run). "per_run": [] }, } # Dictionary to look up `lambda` values for all `poisson` calls with scalar # `lambda`, sorted by codeobj.name and poisson_name (`poisson-<idx>`): # all_poisson_lamdas[codeobj.name][poisson_name] = lamda self.all_poisson_lamdas = defaultdict(dict) def get_array_name(self, var, access_data=True, prefix=None): ''' Return a globally unique name for `var`. Parameters ---------- access_data : bool, optional For `DynamicArrayVariable` objects, specifying `True` here means the name for the underlying data is returned. If specifying `False`, the name of object itself is returned (e.g. to allow resizing). prefix: {'_ptr', 'dev', 'd'}, optional Prefix for array name. Host pointers to device memory are prefixed with `dev`, device pointers to device memory are prefixed with `d` and pointers used in `scalar_code` and `vector_code` are prefixed with `_ptr` (independent of whether they are used in host or device code). The `_ptr` variables are declared as parameters in the kernel definition (KERNEL_PARAMETERS). ''' # In single-precision mode we replace dt variables in codeobjects with # a single precision version, for details see #148 if hasattr(var, 'real_var'): return self.get_array_name(var.real_var, access_data=access_data, prefix=prefix) prefix = prefix or '' choices = ['_ptr', 'dev', 'd', ''] if prefix not in choices: raise ValueError( f"`prefix` has to be one of {choices} or `None`, got {prefix}" ) if not access_data and prefix in ['_ptr', 'd']: raise ValueError( f"Don't use `'{prefix}'` prefix for a dynamic array object." ) array_name = '' if isinstance(var, DynamicArrayVariable): if access_data: array_name = self.arrays[var] elif var.ndim == 1: array_name = self.dynamic_arrays[var] else: array_name = self.dynamic_arrays_2d[var] elif isinstance(var, ArrayVariable): array_name = self.arrays[var] else: raise TypeError(('Do not have a name for variable of type ' '%s') % type(var)) return prefix + array_name def code_object_class(self, codeobj_class=None, fallback_pref=None): ''' Return `CodeObject` class (either `CUDAStandaloneCodeObject` class or input) Parameters ---------- codeobj_class : a `CodeObject` class, optional If this is keyword is set to None or no arguments are given, this method will return the default (`CUDAStandaloneCodeObject` class). fallback_pref : str, optional For the cuda_standalone device this option is ignored. Returns ------- codeobj_class : class The `CodeObject` class that should be used ''' # Ignore the requested pref (used for optimization in runtime) if codeobj_class is None: return CUDAStandaloneCodeObject else: return codeobj_class def get_array_read_write(self, abstract_code, variables): ################################################################## # This code is copied from CodeGenerator.translate() and # CodeGenerator.array_read_write() and should give us a set of # variables to which will be written or from which will be read in # `vector_code` ################################################################## vector_statements = {} for ac_name, ac_code in abstract_code.items(): statements = make_statements(ac_code, variables, prefs['core.default_float_dtype'], optimise=True, blockname=ac_name) _, vector_statements[ac_name] = statements read = set() write = set() for statements in vector_statements.values(): for stmt in statements: ids = get_identifiers(stmt.expr) # if the operation is inplace this counts as a read. if stmt.inplace: ids.add(stmt.var) read = read.union(ids) write.add(stmt.var) read = set(varname for varname, var in variables.items() if isinstance(var, ArrayVariable) and varname in read) write = set(varname for varname, var in variables.items() if isinstance(var, ArrayVariable) and varname in write) return read, write def code_object(self, owner, name, abstract_code, variables, template_name, variable_indices, codeobj_class=None, template_kwds=None, override_conditional_write=None, compiler_kwds=None): if prefs['core.default_float_dtype'] == np.float32 and 'dt' in variables: # In single-precision mode we replace dt variables in codeobjects with # a single precision version, for details see #148 dt_var = variables['dt'] new_dt_var = ArrayVariable(dt_var.name, dt_var.owner, dt_var.size, dt_var.device, dimensions=dt_var.dim, dtype=np.float32, constant=dt_var.constant, scalar=dt_var.scalar, read_only=dt_var.read_only, dynamic=dt_var.dynamic, unique=dt_var.unique) new_dt_var.real_var = dt_var new_dt_var.set_value(dt_var.get_value().item()) variables['dt'] = new_dt_var if template_kwds is None: template_kwds = dict() else: template_kwds = dict(template_kwds) template_kwds['profiled'] = self.enable_profiling template_kwds['bundle_mode'] = prefs["devices.cuda_standalone.push_synapse_bundles"] no_or_const_delay_mode = False if isinstance(owner, (SynapticPathway, Synapses)) and "delay" in owner.variables and owner.variables["delay"].scalar: # catches Synapses(..., delay=...) syntax, does not catch the case when no delay is specified at all no_or_const_delay_mode = True template_kwds["no_or_const_delay_mode"] = no_or_const_delay_mode # Check if pre/post IDs are needed per synapse # This is the case when presynapic/postsynapitc variables are used in synapses code or # if they are used to set synaptic variables after the first run call. # If in at least one `synapses` template for the same Synapses # object or in a `run_regularly` call (creates a `statupdate`) a # pre/post IDs are needed, we don't delete them. And if a synapses object that is only # run once after the first run call (e.g. syn.w['i<j'] = ...), we don't delete either. # If deleted, they will be deleted on the device in `run_lines` (see below) synapses_object_every_tick = False synapses_object_single_tick_after_run = False if isinstance(owner, Synapses): if template_name in ['synapses', 'stateupdate', 'summed_variable']: synapses_object_every_tick = True if not self.first_run and template_name in ['group_variable_set_conditional', 'group_variable_set']: synapses_object_single_tick_after_run = True if synapses_object_every_tick or synapses_object_single_tick_after_run: read, write = self.get_array_read_write(abstract_code, variables) read_write = read.union(write) synaptic_pre_array_name = self.get_array_name(owner.variables['_synaptic_pre'], False) synaptic_post_array_name = self.get_array_name(owner.variables['_synaptic_post'], False) if synaptic_pre_array_name not in self.delete_synaptic_pre.keys(): self.delete_synaptic_pre[synaptic_pre_array_name] = True if synaptic_post_array_name not in self.delete_synaptic_post.keys(): self.delete_synaptic_post[synaptic_post_array_name] = True error_msg = ("'devices.cuda_standalone.no_{prepost}_references' " "was set to True, but {prepost}synaptic index is " "needed for variable {varname} in {owner.name}") # Check for all variable that are read or written to if they are # i/j or their indices are pre/post for varname in variables.keys(): if varname in read_write: idx = variable_indices[varname] if idx == '_presynaptic_idx' or varname == 'i': self.delete_synaptic_pre[synaptic_pre_array_name] = False if prefs['devices.cuda_standalone.no_pre_references']: raise PreferenceError(error_msg.format(prepost='pre', varname=varname, owner=owner)) if idx == '_postsynaptic_idx' or varname == 'j': self.delete_synaptic_post[synaptic_post_array_name] = False if prefs['devices.cuda_standalone.no_post_references']: raise PreferenceError(error_msg.format(prepost='post', varname=varname, owner=owner)) # Summed variables need the indices of their target variable, which # are not in the read_write set. if template_name == 'summed_variable': idx = template_kwds['_index_var'].name varname = template_kwds['_target_var'].name if idx == '_synaptic_pre': self.delete_synaptic_pre[synaptic_pre_array_name] = False if prefs['devices.cuda_standalone.no_pre_references']: raise PreferenceError(error_msg.format(prepost='pre', varname=varname, owner=owner)) if idx == '_synaptic_post': self.delete_synaptic_post[synaptic_post_array_name] = False if prefs['devices.cuda_standalone.no_post_references']: raise PreferenceError(error_msg.format(prepost='post', varname=varname, owner=owner)) if idx == '_syaptic_post': self.delete_synaptic_post[synaptic_post_array_name] = False if template_name == "synapses": prepost = template_kwds['pathway'].prepost synaptic_effects = "synapse" for varname in variables.keys(): if varname in write: idx = variable_indices[varname] if (prepost == 'pre' and idx == '_postsynaptic_idx') or (prepost == 'post' and idx == '_presynaptic_idx'): # The SynapticPathways 'target' group variables are modified if synaptic_effects == "synapse": synaptic_effects = "target" if (prepost == 'pre' and idx == '_presynaptic_idx') or (prepost == 'post' and idx == '_postsynaptic_idx'): # The SynapticPathways 'source' group variables are modified synaptic_effects = "source" template_kwds["synaptic_effects"] = synaptic_effects logger.debug(f"Synaptic effects of Synapses object {name} modify {synaptic_effects} group variables.") # use atomics if possible (except for `synapses` mode, where we cann parallelise without) # TODO: this overwrites if somebody sets a codeobject in the Synapses(..., codeobj_class=...) if prefs['devices.cuda_standalone.use_atomics'] and synaptic_effects != 'synapses': codeobj_class = CUDAStandaloneAtomicsCodeObject logger.debug( f"Using atomics in synaptic effect application of Synapses object " f"{name}" ) threads_expr = prefs.devices.cuda_standalone.threads_per_synapse_bundle pathway_name = template_kwds['pathway'].name replace_expr = { '{mean}': f'{pathway_name}_bundle_size_mean', '{std}': f'{pathway_name}_bundle_size_std', '{min}': f'{pathway_name}_bundle_size_min', '{max}': f'{pathway_name}_bundle_size_max', } for old, new in replace_expr.items(): threads_expr = threads_expr.replace(old, new) template_kwds["threads_per_synapse_bundle"] = threads_expr template_kwds["bundle_threads_warp_multiple"] = prefs.devices.cuda_standalone.bundle_threads_warp_multiple if template_name in ["synapses_create_generator", "synapses_create_array"]: if owner.multisynaptic_index is not None: template_kwds["multisynaptic_idx_var"] = owner.variables[owner.multisynaptic_index] template_kwds["no_pre_references"] = False template_kwds["no_post_references"] = False if prefs['devices.cuda_standalone.no_pre_references']: template_kwds["no_pre_references"] = True if prefs['devices.cuda_standalone.no_post_references']: template_kwds["no_post_references"] = True template_kwds["launch_bounds"] = prefs["devices.cuda_standalone.launch_bounds"] template_kwds["sm_multiplier"] = prefs["devices.cuda_standalone.SM_multiplier"] template_kwds["syn_launch_bounds"] = prefs["devices.cuda_standalone.syn_launch_bounds"] template_kwds["calc_occupancy"] = prefs["devices.cuda_standalone.calc_occupancy"] if template_name in ["threshold", "spikegenerator"]: template_kwds["extra_threshold_kernel"] = prefs["devices.cuda_standalone.extra_threshold_kernel"] codeobj = super(CUDAStandaloneDevice, self).code_object(owner, name, abstract_code, variables, template_name, variable_indices, codeobj_class=codeobj_class, template_kwds=template_kwds, override_conditional_write=override_conditional_write, compiler_kwds=compiler_kwds, ) return codeobj def check_openmp_compatible(self, nb_threads): if nb_threads > 0: raise NotImplementedError("Using OpenMP in a CUDA standalone project is not supported") def generate_objects_source(self, writer, arange_arrays, synapses, static_array_specs, networks): sm_multiplier = prefs.devices.cuda_standalone.SM_multiplier num_parallel_blocks = prefs.devices.cuda_standalone.parallel_blocks curand_generator_type = prefs.devices.cuda_standalone.random_number_generator_type curand_generator_ordering = prefs.devices.cuda_standalone.random_number_generator_ordering self.eventspace_arrays = {} self.spikegenerator_eventspaces = [] for var, varname in self.arrays.items(): if var.name.endswith('space'): # get all eventspace variables self.eventspace_arrays[var] = varname #if hasattr(var, 'owner') and isinstance(v.owner, Clock): if isinstance(var.owner, SpikeGeneratorGroup): self.spikegenerator_eventspaces.append(varname) for var in self.eventspace_arrays.keys(): del self.arrays[var] multisyn_vars = [] for syn in synapses: if syn.multisynaptic_index is not None: multisyn_vars.append(syn.variables[syn.multisynaptic_index]) arr_tmp = self.code_object_class().templater.objects( None, None, array_specs=self.arrays, dynamic_array_specs=self.dynamic_arrays, dynamic_array_2d_specs=self.dynamic_arrays_2d, zero_arrays=self.zero_arrays, arange_arrays=arange_arrays, synapses=synapses, clocks=self.clocks, static_array_specs=static_array_specs, networks=networks, code_objects=self.code_objects.values(), get_array_filename=self.get_array_filename, all_codeobj_with_host_rng=self.codeobjects_with_rng["host_api"]["all_runs"], sm_multiplier=sm_multiplier, num_parallel_blocks=num_parallel_blocks, curand_generator_type=curand_generator_type, curand_generator_ordering=curand_generator_ordering, curand_float_type=c_data_type(prefs['core.default_float_dtype']), eventspace_arrays=self.eventspace_arrays, spikegenerator_eventspaces=self.spikegenerator_eventspaces, multisynaptic_idx_vars=multisyn_vars, profiled_codeobjects=self.profiled_codeobjects) # Reinsert deleted entries, in case we use self.arrays later? maybe unnecassary... self.arrays.update(self.eventspace_arrays) writer.write('objects.*', arr_tmp) def generate_main_source(self, writer): main_lines = [] procedures = [('', main_lines)] runfuncs = {} run_counter = 0 for func, args in self.main_queue: if func=='before_run_code_object': codeobj, = args main_lines.append('_before_run_%s();' % codeobj.name) elif func=='run_code_object': codeobj, = args codeobj.runs_every_tick = False # Need to check for RNG functions in code objects only run once (e.g. # when setting group variables before run). The results are stored in # `code_object.rng_calls`, `code_object.poisson_lamdas` and # `code_object.needs_curand_states`. prepare_codeobj_code_for_rng(codeobj) if codeobj.needs_curand_states: self.codeobjects_with_rng["device_api"]["single_tick"].append(codeobj) if isinstance(codeobj.owner, Synapses) \ and codeobj.template_name in ['group_variable_set_conditional', 'group_variable_set']: # At curand state initalization, synapses are not generated yet. # For codeobjects run every tick, this happens in the init() of # the random number buffer called at first clock cycle of the network main_lines.append('random_number_buffer.ensure_enough_curand_states();') main_lines.append(f'_run_{codeobj.name}();') elif func == 'after_run_code_object': codeobj, = args main_lines.append(f'_after_run_{codeobj.name}();') elif func=='run_network': net, netcode = args if run_counter == 0: # These lines delete `i`/`j` variables stored per synapse. They need to be called after # synapses_initialise_queue codeobjects, therefore just before the network creation, so I # put them here for synaptic_pre, boolean in self.delete_synaptic_pre.items(): if boolean: code = f''' dev{synaptic_pre}.clear(); dev{synaptic_pre}.shrink_to_fit(); ''' main_lines.extend(stripped_deindented_lines(code)) for synaptic_post, boolean in self.delete_synaptic_post.items(): if boolean: code = f''' dev{synaptic_post}.clear(); dev{synaptic_post}.shrink_to_fit(); ''' main_lines.extend(stripped_deindented_lines(code)) main_lines.extend(netcode) run_counter += 1 elif func=='set_by_constant': arrayname, value, is_dynamic = args size_str = f"{arrayname}.size()" if is_dynamic else f"_num_{arrayname}" rendered_value = CPPNodeRenderer().render_expr(repr(value)) pointer_arrayname = f"dev{arrayname}" if arrayname.endswith('space'): # eventspace pointer_arrayname += f'[current_idx{arrayname}]' if is_dynamic: pointer_arrayname = f"thrust::raw_pointer_cast(&dev{arrayname}[0])" code = f''' for(int i=0; i<{size_str}; i++) {{ {arrayname}[i] = {rendered_value}; }} CUDA_SAFE_CALL( cudaMemcpy( {pointer_arrayname}, &{arrayname}[0], sizeof({arrayname}[0])*{size_str}, cudaMemcpyHostToDevice ) ); ''' main_lines.extend(stripped_deindented_lines(code)) elif func=='set_by_single_value': arrayname, item, value = args pointer_arrayname = f"dev{arrayname}" if arrayname.endswith('space'): # eventspace pointer_arrayname += f'[current_idx{arrayname}]' if arrayname in self.dynamic_arrays.values(): pointer_arrayname = f"thrust::raw_pointer_cast(&dev{arrayname}[0])" code = f''' {arrayname}[{item}] = {value}; CUDA_SAFE_CALL( cudaMemcpy( {pointer_arrayname} + {item}, &{arrayname}[{item}], sizeof({arrayname}[{item}]), cudaMemcpyHostToDevice ) ); ''' main_lines.extend(stripped_deindented_lines(code)) elif func=='set_by_array': arrayname, staticarrayname, is_dynamic = args size_str = "_num_" + arrayname if is_dynamic: size_str = arrayname + ".size()" pointer_arrayname = f"dev{arrayname}" if arrayname in self.dynamic_arrays.values(): pointer_arrayname = f"thrust::raw_pointer_cast(&dev{arrayname}[0])" code = f''' for(int i=0; i<_num_{staticarrayname}; i++) {{ {arrayname}[i] = {staticarrayname}[i]; }} CUDA_SAFE_CALL( cudaMemcpy( {pointer_arrayname}, &{arrayname}[0], sizeof({arrayname}[0])*{size_str}, cudaMemcpyHostToDevice ) ); ''' main_lines.extend(stripped_deindented_lines(code)) elif func=='set_array_by_array': arrayname, staticarrayname_index, staticarrayname_value = args code = f''' for(int i=0; i<_num_{staticarrayname_index}; i++) {{ {arrayname}[{staticarrayname_index}[i]] = {staticarrayname_value}[i]; }} CUDA_SAFE_CALL( cudaMemcpy( dev{arrayname}, &{arrayname}[0], sizeof({arrayname}[0])*_num_{arrayname}, cudaMemcpyHostToDevice ) ); ''' main_lines.extend(stripped_deindented_lines(code)) elif func=='resize_array': array_name, new_size = args code = f''' {array_name}.resize({new_size}); THRUST_CHECK_ERROR(dev{array_name}.resize({new_size})); ''' main_lines.extend(stripped_deindented_lines(code)) elif func=='insert_code': main_lines.append(args) elif func=='start_run_func': name, include_in_parent = args if include_in_parent: main_lines.append(f'{name}();') main_lines = [] procedures.append((name, main_lines)) elif func=='end_run_func': name, include_in_parent = args name, main_lines = procedures.pop(-1) runfuncs[name] = main_lines name, main_lines = procedures[-1] elif func=='seed': seed = args if seed is None: # draw random seed in range of possible uint64 numbers seed = np.random.randint(np.iinfo(np.uint64).max, dtype=np.uint64) main_lines.append(f'random_number_buffer.set_seed({seed!r}ULL);') else: raise NotImplementedError("Unknown main queue function type "+func) # Store the GPU ID and it's compute capability. The latter can be overwritten in # self.generate_makefile() via preferences self.gpu_id, self.compute_capability = select_gpu() # generate the finalisations for codeobj in self.code_objects.values(): if hasattr(codeobj.code, 'main_finalise'): main_lines.append(codeobj.code.main_finalise) user_headers = self.headers + prefs['codegen.cpp.headers'] main_tmp = self.code_object_class().templater.main(None, None, gpu_id=self.gpu_id, main_lines=main_lines, code_lines=self.code_lines, code_objects=self.code_objects.values(), report_func=self.report_func, dt=float(defaultclock.dt), user_headers=user_headers, gpu_heap_size=prefs['devices.cuda_standalone.cuda_backend.gpu_heap_size'] ) writer.write('main.cu', main_tmp) def generate_codeobj_source(self, writer): code_object_defs = defaultdict(list) host_parameters = defaultdict(list) kernel_parameters = defaultdict(list) kernel_constants = defaultdict(list) c_float_dtype = c_data_type(prefs['core.default_float_dtype']) c_int_dtype = 'unsigned int' # Generate data for non-constant values for codeobj in self.code_objects.values(): code_object_defs_lines = [] code_object_defs_lines_host_only = [] host_parameters_lines = [] kernel_parameters_lines = [] kernel_constants_lines = [] additional_code = [] number_elements = "" if hasattr(codeobj, 'owner') and hasattr(codeobj.owner, '_N') and codeobj.owner._N != 0: number_elements = str(codeobj.owner._N) else: number_elements = "_N" # We need the functions to be sorted by keys for reproducable rng with a # given seed: For codeobjects that are only run once, we generate the random # numbers using the curand host API. For that, we insert code into the # `additional_code` block in the host function. If we use multiple random # function in one codeobject (e.g. rand() and randn()), the order in which # they are generated can differ between two codeobjects, which makes the # brian2.tests.test_neurongroup.test_random_values_fixed_seed fail. for k, v in sorted(codeobj.variables.items()): if k == 'dt' and prefs['core.default_float_dtype'] == np.float32: # use the double-precision array versions for dt as kernel arguments # they are cast to single-precision scalar dt in scalar_code v = v.real_var # code objects which only run once if k in ["rand", "randn", "poisson"] and codeobj.runs_every_tick == False and codeobj.template_name != "synapses_create_generator": curand_suffix = '' if prefs['core.default_float_dtype'] == np.float64: curand_suffix = 'Double' if k == "randn": num_calls = codeobj.rng_calls["randn"] code = f''' // Genenerate an array of random numbers on the device // Make sure we generate an even number of random numbers int32_t _randn_N = ({number_elements} % 2 == 0) ? {number_elements} : {number_elements} + 1; {c_float_dtype}* dev_array_randn; CUDA_SAFE_CALL( cudaMalloc( (void**)&dev_array_randn, sizeof({c_float_dtype})*_randn_N*{num_calls} ) ); CUDA_SAFE_CALL( curandGenerateNormal{curand_suffix}( curand_generator, dev_array_randn, _randn_N*{num_calls}, 0, // mean 1 // stddev ) ); ''' additional_code.append(code) kernel_parameters_lines.append( f"{c_float_dtype}* _ptr_array_{codeobj.name}_randn" ) host_parameters_lines.append("dev_array_randn") elif k == "rand": num_calls = codeobj.rng_calls["rand"] code = f''' // Genenerate an array of random numbers on the device // Make sure we generate an even number of random numbers int32_t _rand_N = ({number_elements} % 2 == 0) ? {number_elements} : {number_elements} + 1; {c_float_dtype}* dev_array_rand; CUDA_SAFE_CALL( cudaMalloc( (void**)&dev_array_rand, sizeof({c_float_dtype})*_rand_N*{num_calls} ) ); CUDA_SAFE_CALL( curandGenerateUniform{curand_suffix}( curand_generator, dev_array_rand, _rand_N*{num_calls} ) ); ''' additional_code.append(code) kernel_parameters_lines.append( f"{c_float_dtype}* _ptr_array_{codeobj.name}_rand" ) host_parameters_lines.append("dev_array_rand") elif k == "poisson": # We are assuming that there can be at most one poisson call per expression, # else brian2 should raise a NotImplementedError due to multiple stateful function calls. assert len(codeobj.poisson_lamdas) < 2, report_issue_message if len(codeobj.poisson_lamdas) == 0: ### On-the-fly poisson number generation (curand device API) # If we have a poisson function call and no entry in # `poisson_lamdas`, we must have a variable lamda and are # using on-the-fly RNG We don't need to add any code, we # will use the device implementation defined in # cuda_generator.py assert codeobj.needs_curand_states, report_issue_message else: # len(codeobj.poisson_lamdas) == 1 ### Pregenerated poisson number (curand host API) # There only one poisson call, hence we have only `poisson_0` poisson_name = 'poisson_0' # curand generates `unsigned int`, we cast it to `int32_t` in our `_poisson` implementation num_calls = codeobj.rng_calls[poisson_name] lamda = codeobj.poisson_lamdas[poisson_name] code = f''' // Genenerate an array of random numbers on the device // Make sure we generate an even number of random numbers int32_t _{poisson_name}_N = ({number_elements} % 2 == 0) ? {number_elements} : {number_elements} + 1; {c_int_dtype}* dev_array_{poisson_name}; CUDA_SAFE_CALL( cudaMalloc( (void**)&dev_array_{poisson_name}, sizeof(unsigned int)*_{poisson_name}_N*{num_calls} ) ); CUDA_SAFE_CALL( curandGeneratePoisson( curand_generator, dev_array_{poisson_name}, _{poisson_name}_N*{num_calls}, {lamda} ) ); ''' additional_code.append(code) kernel_parameters_lines.append( f"{c_int_dtype}* _ptr_array_{codeobj.name}_{poisson_name}" ) host_parameters_lines.append(f"dev_array_{poisson_name}") # Clock variables (t, dt, timestep) elif hasattr(v, 'owner') and isinstance(v.owner, Clock): # Clocks only run on the host and the corresponding device variables are copied # to the device only once in the beginning and in the end of a simulation. # Therefore, we pass clock variables (t, dt, timestep) by value as kernel # parameters whenever they are needed on the device. These values are translated # into pointers in CUDACodeGenerator.determine_keywords(), such that they can be # used in scalar/vector code. arrayname = self.get_array_name(v) dtype = c_data_type(v.dtype) host_parameters_lines.append( f"{arrayname}[0]") kernel_parameters_lines.append( f"const {dtype} _value{arrayname}") # ArrayVariables (dynamic and not) elif isinstance(v, ArrayVariable): # These templates run on the host host_codeobjects = ['synapses_create_generator', 'synapses_create_array'] # These templates run on host and device (e.g. synapses_push_spikes # has a before_run codeobject that runs on host only while the run # codeobject runs on the device) host_and_device_codeobjects = ['synapses_push_spikes', 'spatialstateupdate'] prefixes = ['dev'] if codeobj.template_name in host_codeobjects: prefixes = [''] elif codeobj.template_name in host_and_device_codeobjects: # Start with no prefix, such that `_num{array}` variables are # determined from the host vector if array is dynamic prefixes = ['', 'dev'] for n_prefix, prefix in enumerate(prefixes): try: dyn_array_name = self.get_array_name(v, access_data=False, prefix=prefix) array_name = self.get_array_name(v, access_data=True, prefix=prefix) ptr_array_name = self.get_array_name(v, access_data=True, prefix='_ptr') dtype = c_data_type(v.dtype) if isinstance(v, DynamicArrayVariable): if v.ndim == 1: code_object_defs_lines.append( f'{dtype}* const {array_name} = thrust::raw_pointer_cast(&{dyn_array_name}[0]);' ) # Add host and kernel parameters only for device pointers if prefix == 'dev': # These lines are used to define the kernel call parameters, that # means only for codeobjects running on the device. The array names # always have a `_dev` prefix. host_parameters_lines.append(f"{array_name}") # These lines declare kernel parameters as the `_ptr` variables that # are used in `scalar_code` and `vector_code`. # TODO: here we should add const / __restrict and other optimizations # for variables that are e.g. only read in the kernel kernel_parameters_lines.append(f"{dtype}* {ptr_array_name}") # Add size variables `_num{array}` only once and if # there are two prefixes, base it on host array # `{array}.size()` if len(prefixes) == 1 or prefix == '': code_object_defs_lines.append( f'const int _num{k} = {dyn_array_name}.size();' ) host_parameters_lines.append(f"_num{k}") kernel_parameters_lines.append(f"const int _num{k}") else: # v is ArrayVariable but not DynamicArrayVariable # Add host and kernel parameters only for device pointers if prefix == 'dev': idx = '' if k.endswith('space'): bare_array_name = self.get_array_name(v) idx = f'[current_idx{bare_array_name}]' host_parameters_lines.append(f"{array_name}{idx}") kernel_parameters_lines.append(f'{dtype}* {ptr_array_name}') # Add size variables `_num{array}` only once if n_prefix == 0: code_object_defs_lines.append(f'const int _num{k} = {v.size};') kernel_constants_lines.append(f'const int _num{k} = {v.size};') except TypeError: pass # This rand stuff got a little messy... we pass a device pointer as kernel variable and have a hash define for rand() -> _ptr_..._rand[] # The device pointer is advanced every clock cycle in rand.cu and reset when the random number buffer is refilled (also in rand.cu) # TODO can we just include this in the k == 'rand' test above? # RAND if codeobj.rng_calls["rand"] >= 1 and codeobj.runs_every_tick: host_parameters_lines.append(f"dev_{codeobj.name}_rand") kernel_parameters_lines.append( f"{c_float_dtype}* _ptr_array_{codeobj.name}_rand" ) # RANDN if codeobj.rng_calls["randn"] >= 1 and codeobj.runs_every_tick: host_parameters_lines.append(f"dev_{codeobj.name}_randn") kernel_parameters_lines.append( f"{c_float_dtype}* _ptr_array_{codeobj.name}_randn" ) # POISSON (with scalar lamda) # Here, we don't use the hash define as for rand/n, instead we pass the # kernel paramter (_ptr...) directly to the _poisson function which returns # the correct element # TODO: We could do the same for rand/n and get rid of the hash define hack for rng_type in codeobj.rng_calls.keys(): if rng_type not in ["rand", "randn"] and codeobj.runs_every_tick: assert rng_type.startswith("poisson") if codeobj.rng_calls[rng_type] >= 1: host_parameters_lines.append( f"dev_{codeobj.name}_{rng_type}" ) kernel_parameters_lines.append( f"{c_int_dtype}* _ptr_array_{codeobj.name}_{rng_type}" ) # Sometimes an array is referred to by to different keys in our # dictionary -- make sure to never add a line twice for line in code_object_defs_lines: if not line in code_object_defs[codeobj.name]: code_object_defs[codeobj.name].append(line) for line in host_parameters_lines: if not line in host_parameters[codeobj.name]: host_parameters[codeobj.name].append(line) for line in kernel_parameters_lines: if not line in kernel_parameters[codeobj.name]: kernel_parameters[codeobj.name].append(line) for line in chain(kernel_constants_lines): if not line in kernel_constants[codeobj.name]: kernel_constants[codeobj.name].append(line) for line in additional_code: if not line in code_object_defs[codeobj.name]: code_object_defs[codeobj.name].append(line) # Generate the code objects for codeobj in self.code_objects.values(): ns = codeobj.variables def _replace_constants_and_parameters(code): # HOST_CONSTANTS are equivalent to C++ Standalone's CONSTANTS code = code.replace('%HOST_CONSTANTS%', '\n\t\t'.join(code_object_defs[codeobj.name])) # KERNEL_CONSTANTS are the same for inside device kernels code = code.replace('%KERNEL_CONSTANTS%', '\n\t'.join(kernel_constants[codeobj.name])) # HOST_PARAMETERS are parameters that device kernels are called with from host code code = code.replace('%HOST_PARAMETERS%', ',\n\t\t\t'.join(host_parameters[codeobj.name])) # KERNEL_PARAMETERS are the same names of the same parameters inside the device kernels code = code.replace('%KERNEL_PARAMETERS%', ',\n\t'.join(kernel_parameters[codeobj.name])) code = code.replace('%CODEOBJ_NAME%', codeobj.name) return code # Before/after run code for block in codeobj.before_after_blocks: cu_code = getattr(codeobj.code, block + '_cu_file') cu_code = self.freeze(cu_code, ns) cu_code = _replace_constants_and_parameters(cu_code) h_code = getattr(codeobj.code, block + '_h_file') writer.write('code_objects/' + block + '_' + codeobj.name + '.cu', cu_code) writer.write('code_objects/' + block + '_' + codeobj.name + '.h', h_code) # Main code # TODO: fix these freeze/HOST_CONSTANTS hacks somehow - they work but not elegant. code = self.freeze(codeobj.code.cu_file, ns) if len(host_parameters[codeobj.name]) == 0: host_parameters[codeobj.name].append("0") kernel_parameters[codeobj.name].append("int dummy") code = _replace_constants_and_parameters(code) # substitue in generated code double types with float types in # single-precision mode if prefs['core.default_float_dtype'] == np.float32: # cast time differences (double) to float in event-drive updates sub = 't - lastupdate' if sub in code: code = code.replace(sub, f'float({sub})') logger.debug(f"Replaced {sub} with float({sub}) in {codeobj.name}") # replace double-precision floating-point literals with their # single-precision version (e.g. `1.0` -> `1.0f`) code = replace_floating_point_literals(code) logger.debug( f"Replaced floating point literals by single precision version " f"(appending `f`) in {codeobj.name}." ) writer.write('code_objects/'+codeobj.name+'.cu', code) writer.write('code_objects/'+codeobj.name+'.h', codeobj.code.h_file) def generate_rand_source(self, writer): # Device side binomial functions and poisson functions with variables lambda use # the curand device api. The curand states (one per thread executed in parallel) # are initialized in rand.cu, where as many curand states are initialized as the # size of the largest codeobject with curand device api calls. needed_number_curand_states = {} for co in (self.codeobjects_with_rng["device_api"]["every_tick"] + self.codeobjects_with_rng["device_api"]["single_tick"]): co_name = co.owner.name if co_name not in needed_number_curand_states: if isinstance(co.owner, Synapses): # this is the pointer to the synapse object's N, which is a # null pointer before synapses are generated and an int ptr # after synapse generation (used to test if synapses # already generated or not) N_ptr = f'_array_{co_name}_N' N_value = N_ptr + '[0]' else: N_ptr = None N_value = co.owner._N needed_number_curand_states[co_name] = (N_ptr, N_value) rand_tmp = self.code_object_class().templater.rand(None, None, codeobjects_with_rng_per_run=self.codeobjects_with_rng["host_api"]["per_run"], all_poisson_lamdas=self.all_poisson_lamdas, needed_number_curand_states=needed_number_curand_states, number_run_calls=len(self.codeobjects_with_rng["host_api"]["per_run"]), profiled=self.enable_profiling, curand_float_type=c_data_type(prefs['core.default_float_dtype'])) writer.write('rand.*', rand_tmp) def copy_source_files(self, writer, directory): # Copy the brianlibdirectory brianlib_dir = os.path.join(os.path.split(inspect.getsourcefile(self.code_object_class()))[0], 'brianlib') brianlib_files = copy_directory(brianlib_dir, os.path.join(directory, 'brianlib')) for file in brianlib_files: if file.lower().endswith('.cpp'): writer.source_files.add('brianlib/'+file) if file.lower().endswith('.cu'): writer.source_files.add('brianlib/'+file) elif file.lower().endswith('.h'): writer.header_files.add('brianlib/'+file) def generate_network_source(self, writer): maximum_run_time = self._maximum_run_time if maximum_run_time is not None: maximum_run_time = float(maximum_run_time) network_tmp = self.code_object_class().templater.network(None, None, maximum_run_time=maximum_run_time, eventspace_arrays=self.eventspace_arrays, spikegenerator_eventspaces=self.spikegenerator_eventspaces) writer.write('network.*', network_tmp) def generate_synapses_classes_source(self, writer): synapses_classes_tmp = self.code_object_class().templater.synapses_classes(None, None) writer.write('synapses_classes.*', synapses_classes_tmp) def generate_run_source(self, writer): run_tmp = self.code_object_class().templater.run(None, None, run_funcs=self.runfuncs, code_objects=self.code_objects.values(), user_headers=self.headers, array_specs=self.arrays, clocks=self.clocks) writer.write('run.*', run_tmp) def generate_makefile(self, writer, cpp_compiler, cpp_compiler_flags, cpp_linker_flags, debug, disable_asserts): available_gpu_arch_flags = ( '--gpu-architecture', '-arch', '--gpu-code', '-code', '--generate-code', '-gencode' ) nvcc_compiler_flags = prefs.devices.cuda_standalone.cuda_backend.extra_compile_args_nvcc gpu_arch_flags = [] for flag in nvcc_compiler_flags: if flag.startswith(available_gpu_arch_flags): gpu_arch_flags.append(flag) nvcc_compiler_flags.remove(flag) elif flag.startswith(('-w', '--disable-warnings')): # add the flage to linker flags, else linking will give warnings cpp_linker_flags.append(flag) # Make the linker options (meant to be passed to `gcc`) compatible with `nvcc` for i, flag in enumerate(cpp_linker_flags): if flag.startswith('-Wl,'): # -Wl,<option> passes <option> directly to linker # for gcc `-Wl,<option>`, for nvcc `-Xlinker "<option>"` cpp_linker_flags[i] = flag.replace('-Wl,', '-Xlinker ') # Check if compute capability was set manually via preference compute_capability_pref = prefs.devices.cuda_standalone.cuda_backend.compute_capability # If GPU architecture was set via `extra_compile_args_nvcc` and # `compute_capability`, ignore `compute_capability` if gpu_arch_flags and compute_capability_pref is not None: logger.warn( "GPU architecture for compilation was specified via " "`prefs.devices.cuda_standalone.cuda_backend.compute_capability` and " "`prefs.devices.cuda_standalone.cuda_backend.extra_compile_args_nvcc`. " "`prefs.devices.cuda_standalone.cuda_backend.compute_capability` will be ignored. " "To get rid of this warning, set " "`prefs.devices.cuda_standalone.brian_backend.compute_capability` to it's default " "value `None`" ) # Ignore compute capability of chosen GPU and the one manually set via # `compute_capability` preferences. self.compute_capability = None # If GPU architecture was set only via `extra_compile_args_nvcc`, use that elif gpu_arch_flags: # Ignore compute capability of chosen GPU self.compute_capability = None # If GPU architecture was set only via `compute_capability` prefs, use that elif compute_capability_pref is not None: self.compute_capability = compute_capability_pref # If compute_capability wasn't set manually, the one from the chosen GPU is used # (stored in self.compute_capability, see self.generate_main_source()) if self.compute_capability is not None: # check if compute capability is supported if self.compute_capability < self.minimal_compute_capability: raise NotImplementedError( f"Compute capability `{self.compute_capability}` is not supported. " f"Minimal supported compute capability is " f"`{self.minimal_compute_capability}`." ) # If GPU architecture is detected automatically or set via `compute_capability` # prefs, we still need to add it as a compile argument if not gpu_arch_flags: # Turn float (3.5) into string ("35") compute_capability_str = ''.join(str(self.compute_capability).split('.')) gpu_arch_flags.append(f"-arch=sm_{compute_capability_str}") # Log compiled GPU architecture if self.compute_capability is None: logger.info( f"Compiling device code with manually set architecture flags " f"({gpu_arch_flags}). Be aware that the minimal supported compute " f"capability is {self.minimal_compute_capability} " "(we are not checking your compile flags)" ) else: logger.info( f"Compiling device code for compute capability " f"{self.compute_capability} (compiler flags: {gpu_arch_flags})" ) nvcc_path = get_nvcc_path() if cpp_compiler=='msvc': # Check CPPStandaloneDevice.generate_makefile() for how to do things raise RuntimeError("Windows is currently not supported. See https://github.com/brian-team/brian2cuda/issues/225") else: # Generate the makefile if os.name=='nt': rm_cmd = 'del *.o /s\n\tdel main.exe $(DEPS)' else: rm_cmd = 'rm $(OBJS) $(PROGRAM) $(DEPS)' if debug: compiler_debug_flags = '-g -DDEBUG -G -DTHRUST_DEBUG' linker_debug_flags = '-g -G' else: compiler_debug_flags = '' linker_debug_flags = '' if disable_asserts: # NDEBUG precompiler macro disables asserts (both for C++ and CUDA) nvcc_compiler_flags += ['-NDEBUG'] makefile_tmp = self.code_object_class().templater.makefile( None, None, source_files=' '.join(sorted(writer.source_files)), header_files=' '.join(sorted(writer.header_files)), cpp_compiler_flags=' '.join(cpp_compiler_flags), compiler_debug_flags=compiler_debug_flags, linker_debug_flags=linker_debug_flags, cpp_linker_flags=' '.join(cpp_linker_flags), nvcc_compiler_flags=' '.join(nvcc_compiler_flags), gpu_arch_flags=' '.join(gpu_arch_flags), nvcc_path=nvcc_path, rm_cmd=rm_cmd, ) writer.write('makefile', makefile_tmp) def build(self, directory='output', compile=True, run=True, debug=False, clean=False, with_output=True, disable_asserts=False, additional_source_files=None, run_args=None, direct_call=True, **kwds): ''' Build the project TODO: more details Parameters ---------- directory : str, optional The output directory to write the project to, any existing files will be overwritten. If the given directory name is ``None``, then a temporary directory will be used (used in the test suite to avoid problems when running several tests in parallel). Defaults to ``'output'``. compile : bool, optional Whether or not to attempt to compile the project. Defaults to ``True``. run : bool, optional Whether or not to attempt to run the built project if it successfully builds. Defaults to ``True``. debug : bool, optional Whether to compile in debug mode. Defaults to ``False``. with_output : bool, optional Whether or not to show the ``stdout`` of the built program when run. Output will be shown in case of compilation or runtime error. Defaults to ``True``. clean : bool, optional Whether or not to clean the project before building. Defaults to ``False``. additional_source_files : list of str, optional A list of additional ``.cu`` files to include in the build. direct_call : bool, optional Whether this function was called directly. Is used internally to distinguish an automatic build due to the ``build_on_run`` option from a manual ``device.build`` call. ''' if self.build_on_run and direct_call: raise RuntimeError('You used set_device with build_on_run=True ' '(the default option), which will automatically ' 'build the simulation at the first encountered ' 'run call - do not call device.build manually ' 'in this case. If you want to call it manually, ' 'e.g. because you have multiple run calls, use ' 'set_device with build_on_run=False.') if self.has_been_run: raise RuntimeError('The network has already been built and run ' 'before. To build several simulations in ' 'the same script, call "device.reinit()" ' 'and "device.activate()". Note that you ' 'will have to set build options (e.g. the ' 'directory) and defaultclock.dt again.') # TODO: remove this when #83 is fixed if not self.build_on_run: run_count = 0 for func, args in self.main_queue: if func == 'run_network': run_count += 1 if run_count > 1: logger.warn("Multiple run statements are currently error prone. " "See #83, #85, #86.") renames = {'project_dir': 'directory', 'compile_project': 'compile', 'run_project': 'run'} if len(kwds): msg = '' for kwd in kwds: if kwd in renames: msg += ("Keyword argument '%s' has been renamed to " "'%s'. ") % (kwd, renames[kwd]) else: msg += f"Unknown keyword argument '{kwd}'. " raise TypeError(msg) if debug and disable_asserts: logger.warn("You have disabled asserts in debug mode. Are you sure this is what you wanted to do?") if additional_source_files is None: additional_source_files = [] if run_args is None: run_args = [] if directory is None: directory = tempfile.mkdtemp(prefix='brian_standalone_') self.project_dir = directory ensure_directory(directory) # Determine compiler flags and directories cpp_compiler, cpp_default_extra_compile_args = get_compiler_and_args() extra_compile_args = self.extra_compile_args + cpp_default_extra_compile_args extra_link_args = self.extra_link_args + prefs['codegen.cpp.extra_link_args'] codeobj_define_macros = [macro for codeobj in self.code_objects.values() for macro in codeobj.compiler_kwds.get('define_macros', [])] define_macros = (self.define_macros + prefs['codegen.cpp.define_macros'] + codeobj_define_macros) codeobj_include_dirs = [include_dir for codeobj in self.code_objects.values() for include_dir in codeobj.compiler_kwds.get('include_dirs', [])] include_dirs = (self.include_dirs + prefs['codegen.cpp.include_dirs'] + codeobj_include_dirs) codeobj_library_dirs = [library_dir for codeobj in self.code_objects.values() for library_dir in codeobj.compiler_kwds.get('library_dirs', [])] library_dirs = (self.library_dirs + prefs['codegen.cpp.library_dirs'] + codeobj_library_dirs) codeobj_runtime_dirs = [runtime_dir for codeobj in self.code_objects.values() for runtime_dir in codeobj.compiler_kwds.get('runtime_library_dirs', [])] runtime_library_dirs = (self.runtime_library_dirs + prefs['codegen.cpp.runtime_library_dirs'] + codeobj_runtime_dirs) codeobj_libraries = [library for codeobj in self.code_objects.values() for library in codeobj.compiler_kwds.get('libraries', [])] libraries = (self.libraries + prefs['codegen.cpp.libraries'] + codeobj_libraries) cpp_compiler_obj = ccompiler.new_compiler(compiler=cpp_compiler) cpp_compiler_flags = (ccompiler.gen_preprocess_options(define_macros, include_dirs) + extra_compile_args) cpp_linker_flags = (ccompiler.gen_lib_options(cpp_compiler_obj, library_dirs=library_dirs, runtime_library_dirs=runtime_library_dirs, libraries=libraries) + extra_link_args) codeobj_source_files = [source_file for codeobj in self.code_objects.values() for source_file in codeobj.compiler_kwds.get('sources', [])] additional_source_files += codeobj_source_files for d in ['code_objects', 'results', 'static_arrays']: ensure_directory(os.path.join(directory, d)) self.writer = CUDAWriter(directory) logger.diagnostic("Writing CUDA standalone project to directory "+os.path.normpath(directory)) self.write_static_arrays(directory) # Check that all names are globally unique names = [obj.name for net in self.networks for obj in net.objects] non_unique_names = [name for name, count in Counter(names).items() if count > 1] if len(non_unique_names): formatted_names = ', '.join(f"'{name}'" for name in non_unique_names) raise ValueError('All objects need to have unique names in ' 'standalone mode, the following name(s) were used ' 'more than once: %s' % formatted_names) self.generate_main_source(self.writer) # Create lists of codobjects using rand, randn, poisson or binomial across all # runs (needed for variable declarations). # - Variables needed for device side rand/randn/poisson are declared in objects.cu: # codeobjects_with_rng["host_api"]["all_runs"]['rand'/'rand'/'poisson'] are needed in `generate_objects_source` # - Variables needed for device side binomial functions are initialized in rand.cu: # codeobjects_with_rng["device_api"]["every_tick"] is needed in `generate_rand_source` for run_codeobj in self.codeobjects_with_rng["host_api"]["per_run"]: for key in run_codeobj.keys(): # keys: 'rand', 'randn', 'poisson-<idx>' self.codeobjects_with_rng["host_api"]["all_runs"][key].extend(run_codeobj[key]) self.generate_codeobj_source(self.writer) net_synapses = [s for net in self.networks for s in net.objects if isinstance(s, Synapses)] self.generate_objects_source(self.writer, self.arange_arrays, net_synapses, self.static_array_specs, self.networks) self.generate_network_source(self.writer) self.generate_synapses_classes_source(self.writer) self.generate_run_source(self.writer) self.generate_rand_source(self.writer) self.copy_source_files(self.writer, directory) self.writer.source_files.update(additional_source_files) self.generate_makefile(self.writer, cpp_compiler, cpp_compiler_flags, cpp_linker_flags, debug, disable_asserts) # Not sure what the best place is to call Network.after_run -- at the # moment the only important thing it does is to clear the objects stored # in magic_network. If this is not done, this might lead to problems # for repeated runs of standalone (e.g. in the test suite). for net in self.networks: net.after_run() logger.info("Using the following preferences for CUDA standalone:") for pref_name in prefs: if "devices.cuda_standalone" in pref_name: logger.info(f"\t{pref_name} = {prefs[pref_name]}") logger.debug("Using the following brian preferences:") for pref_name in prefs: if pref_name not in prefs: logger.debug(f"\t{pref_name} = {prefs[pref_name]}") if compile: self.compile_source(directory, cpp_compiler, debug, clean) if run: self.run(directory, with_output, run_args) def network_run(self, net, duration, report=None, report_period=10*second, namespace=None, profile=False, level=0, **kwds): ################################################### ### This part is copied from CPPStandaoneDevice ### ################################################### self.networks.add(net) if kwds: logger.warn(('Unsupported keyword argument(s) provided for run: ' '%s') % ', '.join(kwds.keys())) # We store this as an instance variable for later access by the # `code_object` method self.enable_profiling = profile # Allow setting `profile` in the `set_device` call (used e.g. in brian2cuda # SpeedTest configurations) if 'profile' in self.build_options: build_profile = self.build_options.pop('profile') if build_profile: self.enable_profiling = True all_objects = net.sorted_objects net._clocks = {obj.clock for obj in all_objects} t_end = net.t+duration for clock in net._clocks: clock.set_interval(net.t, t_end) # Get the local namespace if namespace is None: namespace = get_local_namespace(level=level+2) net.before_run(namespace) self.clocks.update(net._clocks) net.t_ = float(t_end) # TODO: remove this horrible hack for clock in self.clocks: if clock.name=='clock': clock._name = '_clock' # Extract all the CodeObjects # Note that since we ran the Network object, these CodeObjects will be sorted into the right # running order, assuming that there is only one clock code_objects = [] for obj in all_objects: if obj.active: for codeobj in obj._code_objects: code_objects.append((obj.clock, codeobj)) # Code for a progress reporting function standard_code = ''' std::string _format_time(float time_in_s) { float divisors[] = {24*60*60, 60*60, 60, 1}; char letters[] = {'d', 'h', 'm', 's'}; float remaining = time_in_s; std::string text = ""; int time_to_represent; for (int i =0; i < sizeof(divisors)/sizeof(float); i++) { time_to_represent = int(remaining / divisors[i]); remaining -= time_to_represent * divisors[i]; if (time_to_represent > 0 || text.length()) { if(text.length() > 0) { text += " "; } text += (std::to_string(time_to_represent)+letters[i]); } } //less than one second if(text.length() == 0) { text = "< 1s"; } return text; } void report_progress(const double elapsed, const double completed, const double start, const double duration) { if (completed == 0.0) { %STREAMNAME% << "Starting simulation at t=" << start << " s for duration " << duration << " s"; } else { %STREAMNAME% << completed*duration << " s (" << (int)(completed*100.) << "%) simulated in " << _format_time(elapsed); if (completed < 1.0) { const int remaining = (int)((1-completed)/completed*elapsed+0.5); %STREAMNAME% << ", estimated " << _format_time(remaining) << " remaining."; } } %STREAMNAME% << std::endl << std::flush; } ''' if report is None: report_func = '' elif report == 'text' or report == 'stdout': report_func = standard_code.replace('%STREAMNAME%', 'std::cout') elif report == 'stderr': report_func = standard_code.replace('%STREAMNAME%', 'std::cerr') elif isinstance(report, str): report_func = ''' void report_progress(const double elapsed, const double completed, const double start, const double duration) { %REPORT% } '''.replace('%REPORT%', report) else: raise TypeError(('report argument has to be either "text", ' '"stdout", "stderr", or the code for a report ' 'function')) if report_func != '': if self.report_func != '' and report_func != self.report_func: raise NotImplementedError('The CUDA standalone device does not ' 'support multiple report functions, ' 'each run has to use the same (or ' 'none).') self.report_func = report_func if report is not None: report_call = 'report_progress' else: report_call = 'NULL' ############################################################## ### From here on the code differs from CPPStandaloneDevice ### ############################################################## # For each codeobject of this run check if it uses rand, randn, poisson or # binomials. Store these as attributes of the codeobject and create # lists of codeobjects that use rand, randn, poisson or binomials. This only # checks codeobject in the network, meaning only the ones running every # clock cycle. # self.codeobjects_with_rng["host_api"]["per_run"] is a list (one per run) of defaultdicts # with keys 'rand', 'randn', 'poisson_<idx>' and values being lists of # codeobjects. self.codeobjects_with_rng["host_api"]["per_run"].append(defaultdict(list)) run_idx = -1 # last list index # Count random number ocurrences in codeobjects run every tick for _, co in code_objects: # (clock, code_object) prepare_codeobj_code_for_rng(co) if co.rng_calls["rand"] > 0: self.codeobjects_with_rng["host_api"]["per_run"][run_idx]['rand'].append(co) if co.rng_calls["randn"] > 0: self.codeobjects_with_rng["host_api"]["per_run"][run_idx]['randn'].append(co) for poisson_name, lamda in co.poisson_lamdas.items(): self.all_poisson_lamdas[co.name][poisson_name] = lamda self.codeobjects_with_rng["host_api"]["per_run"][run_idx][poisson_name].append(co) if co.needs_curand_states: if co not in self.codeobjects_with_rng["device_api"]["every_tick"]: self.codeobjects_with_rng["device_api"]["every_tick"].append(co) # To profile SpeedTests, we need to be able to set `profile` in # `set_device`. Here we catch that case. if 'profile' in self.build_options: build_profile = self.build_options.pop('profile') if build_profile: self.enable_profiling = True # Generate the updaters run_lines = [] run_lines.append(f'{net.name}.clear();') # create all random numbers needed for the next clock cycle for clock in net._clocks: run_lines.append(f'{net.name}.add(&{clock.name}, _run_random_number_buffer);') all_clocks = set() for clock, codeobj in code_objects: run_lines.append(f'{net.name}.add(&{clock.name}, _run_{codeobj.name});') all_clocks.add(clock) # Under some rare circumstances (e.g. a NeuronGroup only defining a # subexpression that is used by other groups (via linking, or recorded # by a StateMonitor) *and* not calculating anything itself *and* using a # different clock than all other objects) a clock that is not used by # any code object should nevertheless advance during the run. We include # such clocks without a code function in the network. for clock in net._clocks: if clock not in all_clocks: run_lines.append(f'{net.name}.add(&{clock.name}, NULL);') # In our benchmark scripts we run one example `nvprof` run, # which is informative especially when not running in profile # mode. In order to have the `nvprof` call only profile the # kernels which are run every timestep, we add # `cudaProfilerStart()`, `cudaDeviceSynchronize()` and # `cudaProfilerStop()`. But this might be confusing for anybody # who runs `nvprof` on their generated code, since it will not # report any profiling info about kernels, that initialise # things only once in the beginning? Maybe get rid of it in a # release version? (TODO: add via insert_code mechanism) run_lines.append('CUDA_SAFE_CALL(cudaProfilerStart());') run_lines.extend(self.code_lines['before_run']) # run everything that is run on a clock run_lines.append( f'{net.name}.run({float(duration)!r}, {report_call}, {float(report_period)!r});' ) run_lines.extend(self.code_lines['after_run']) # for multiple runs, the random number buffer needs to be reset run_lines.append('random_number_buffer.run_finished();') # nvprof stuff run_lines.append('CUDA_SAFE_CALL(cudaDeviceSynchronize());') run_lines.append('CUDA_SAFE_CALL(cudaProfilerStop());') self.main_queue.append(('run_network', (net, run_lines))) # Manually set the cache for the clocks, simulation scripts might # want to access the time (which has been set in code and is therefore # not accessible by the normal means until the code has been built and # run) for clock in net._clocks: self.array_cache[clock.variables['timestep']] = np.array([clock._i_end]) self.array_cache[clock.variables['t']] = np.array([clock._i_end * clock.dt_]) # Initialize eventspaces with -1 before the network runs for codeobj in self.code_objects.values(): if codeobj.template_name == "threshold" or codeobj.template_name == "spikegenerator": for key in codeobj.variables.keys(): if key.endswith('space'): # get the correct eventspace name eventspace_name = self.get_array_name(codeobj.variables[key], False) # In case of custom scheduling, the thresholder might come after synapses or monitors # and needs to be initialized in the beginning of the simulation # See generate_main_source() for main_queue formats # Initialize entire eventspace array with -1 at beginning of main self.main_queue.insert( 0, # list insert position # func , (arrayname, value, is_dynamic) ('set_by_constant', (eventspace_name, -1, False)) ) # Set the last value (index N) in the eventspace array to 0 (-> event counter) self.main_queue.insert( 1, # list insert position ( 'set_by_single_value', # func # arrayname , item, , value (eventspace_name, f"_num_{eventspace_name} - 1", 0) ) ) if self.build_on_run: if self.has_been_run: raise RuntimeError('The network has already been built and run ' 'before. Use set_device with ' 'build_on_run=False and an explicit ' 'device.build call to use multiple run ' 'statements with this device.') self.build(direct_call=False, **self.build_options) self.first_run = False def network_store(self, net, *args, **kwds): raise NotImplementedError(('The store/restore mechanism is not ' 'supported in CUDA standalone')) def network_restore(self, net, *args, **kwds): raise NotImplementedError(('The store/restore mechanism is not ' 'supported in CUDA standalone')) def prepare_codeobj_code_for_rng(codeobj): ''' Prepare a CodeObject for random number generation (RNG). There are two different ways that random numbers are generated in CUDA: 1) Using a buffer system which is refilled from host code in regular intervals using the cuRAND host API. This is used for `rand()`, `randn()` and `poisson(lambda)` when `lambda` is a scalar. The buffer system is implemented in the `rand.cu` template. 2) Using on-the-fly RNG from device code using the cuRAND device API. This is used for `binomial` and `poisson(lambda)` when `lambda` is a vectorized variable (different across neurons/synapses). This needs initilization of cuRAND random states, which is also happening in the `rand.cu` template. This function counts the number of `rand()`, `randn()` and `poisson(<lambda>)` appearances in `codeobj.code.cu_file` and stores this number in the `codeobj.rng_calls` dictionary (with keys `"rand"`, `"randn"` and `"poisson_<idx>"` ,one <idx> per `poisson()` call). If the codeobject uses the curand device API for RNG (for binomial of poisson with variable lambda), this function sets `codeobj.needs_curand_states = True`. For RNG functions that use the buffer system, this function replaces the function arguments in the generated code such that a pointer to the random number buffer and the correct index are passed as function arguments. For RNG functions that use on-the-fly RNG, the functions are not replaced since no pointer or index has to be passed. For the `poisson` RNG, the RNG type depends on the `lambda` value. For scalar `lambda`, we use the buffer system which is most efficient and most robust in the RNG. For vectorized `lambda` values, the host API is inefficient and instead the simple device API is used, which is the most efficient but least robust. For the two RNG systems to work, we overload the CUDA implementation of `_poisson` with `_poisson(double _lambda, ...)` and `_poisson(unsigned int* _poisson_buffer, ...)`. When the buffer system is used, we replace the `_poisson(<lambda>, ...)` calls with `_poisson(<int_pointer>, ...)` calls. For `poisson` with `lambda <= 0`, the returned random numbers are always `0`. This function makes sure that the `lambda` is replaced with a double literal for our overloaded `_poisson` function to work correctly. Parameters ---------- codeobj: CodeObjects Codeobject with generated CUDA code in `codeobj.code.cu_file`. ''' # synapses_create_generator uses host side random number generation if codeobj.template_name == 'synapses_create_generator': return ### RAND/N REGEX # regex explained # (?<!...) negative lookbehind: don't match if ... preceeds # XXX: This only excludes #define lines which have exactly one space to '_rand'. # As long is we don't have '#define _rand' with 2 spaces, thats fine. # \b - non-alphanumeric character (word boundary, does not consume a character) pattern_template = r'(?<!#define )\b_{rng_func}\(_vectorisation_idx\)' rand_randn_pattern = { 'rand': pattern_template.format(rng_func='rand'), 'randn': pattern_template.format(rng_func='randn'), } # Store number of matches in codeobj.rng_calls dictionary for rng_type in ['rand', 'randn']: matches = re.findall(rand_randn_pattern[rng_type], codeobj.code.cu_file) num_calls = len(matches) codeobj.rng_calls[rng_type] = num_calls logger.diagnostic( f"Matched {num_calls} {rng_type} calls for {codeobj.name}" ) ### POISSON REGEX # (?P<lambda>*?) Named group: Returns whatever is matched inside the brackets # instead of the entire string and stores it in the variable # `lamda`. # `.*?` does a non-greedy match of all (*). This is the lambda value. poisson_pattern = r'(?<!#define )\b_poisson\((?P<lamda>.*?), _vectorisation_idx\)' matches_poisson = re.findall(poisson_pattern, codeobj.code.cu_file) # Collect the number of poisson calls separated by lambda values (we need to # generate poisson values separately for each lambda value when using cuRAND host # API). We call the poisson functions with different lambda `poisson-<idx>`, where # `<idx>` is enuemerates all poisson functions. It looks like this: # # codeobj.rng_calls.keys() = ["poisson_0", "poisson_1", ...] # codeobj.rng_calls["poisson_0"] = <number_of_calls_per_time_step> # # The lamda values for all poisson functions are stored in # device.all_poisson_lamdas[codeobj.name]["poisson_0"] = <lambda_value> lamda_matches = {} poisson_device_api = False poisson_with_lamda_zero = [] for i, lamda_match in enumerate(sorted(set(matches_poisson))): poisson_name = f"poisson_{i}" # Test if the lambda_match from poisson(<lambda_match>) is scalar or vectorized # (different across neurons/synapses of the codeobject owner) try: # Try to convert it to float, will raise ValueError if not possible # This will work only if `lambda_match` is a literal, e.g. in `poisson(5)` lamda = float(lamda_match) lamda_is_scalar = True logger.debug( f"Matched literal scalar lambda {lamda} for {poisson_name} in {codeobj.name}" ) except ValueError: # lamda is not a float but a variable, e.g. `poisson(var)` # Check if lamda is scalar and constant (i.e. doesn't change during a run) # TODO: check if scalar variable can be set during in run. If so, check for # constant here as well! # TODO: make sure that a scalar and constand variable can't be changed # during a run! if lamda_match in codeobj.variables and codeobj.variables[lamda_match].scalar: lamda = codeobj.variables[lamda_match].value const = codeobj.variables[lamda_match].constant lamda_is_scalar = True logger.debug( f"Matched non-literal scalar lambda {lamda} for {poisson_name} in {codeobj.name}" ) else: # lamda is an array variable lamda = lamda_match lamda_is_scalar = False logger.debug( f"Matched vectorized lambda {lamda} in {codeobj.name}" ) if lamda_is_scalar: # We don't want to generate random numbers on the host for lambda <= 0, # which should return 0 anyways. We use the _poisson(double, ...) function, # which checks for lambda <= 0 before calling the curand devica API. Hence # we don't need host side RNG or curand states. lamda_matches[poisson_name] = lamda_match if lamda <= 0: # We need to replace '0' with '0.0' (double literal) for lambda in this # case, see comment below poisson_with_lamda_zero.append(poisson_name) continue assert lamda not in codeobj.poisson_lamdas.values() assert poisson_name not in codeobj.poisson_lamdas.keys() codeobj.poisson_lamdas[poisson_name] = lamda codeobj.rng_calls[poisson_name] = matches_poisson.count(lamda_match) else: codeobj.needs_curand_states = True poisson_device_api = True # We have two if/else code paths in synapses code (homog. / heterog. delay mode), # therefore we have twice as much matches for rand/randn/poisson-<idx> as actual # calls. Hence we half the number of detected calls here. if codeobj.template_name == 'synapses': for rng_type in codeobj.rng_calls.keys(): assert codeobj.rng_calls[rng_type] % 2 == 0 codeobj.rng_calls[rng_type] //= 2 # RAND/N # Substitue the _vectorisation_idx of _rand/n calls such that different calls always # get different random numbers from the random number buffers # Substitute rand/n arguments twice for synapses templates repeat = 2 if codeobj.template_name == 'synapses' else 1 for rng_type in ["rand", "randn"]: if codeobj.rng_calls[rng_type] > 0: for _ in range(repeat): for i in range(codeobj.rng_calls[rng_type]): codeobj.code.cu_file = re.sub( rand_randn_pattern[rng_type], f"_{rng_type}(_vectorisation_idx + {i} * _N)", codeobj.code.cu_file, count=1 ) # POISSON sub_repl_template = ( "_poisson(_ptr_array_{codeobj.name}_{poisson_name}, _vectorisation_idx + {i} * _N)" ) for poisson_name, lamda_match in lamda_matches.items(): sub_pattern = poisson_pattern.replace( # use the correct lamda instead of matching the lamda "(?P<lamda>.*?)", lamda_match ) if codeobj.rng_calls[poisson_name] > 0: for _ in range(repeat): for i in range(codeobj.rng_calls[poisson_name]): sub_repl = sub_repl_template.format( codeobj=codeobj, poisson_name=poisson_name, i=i ) codeobj.code.cu_file = re.sub( sub_pattern, sub_repl, codeobj.code.cu_file, count=1 ) elif poisson_name in poisson_with_lamda_zero: # Make sure the _poisson argument is a double literal. "0" fails since # it can be interpreted as both, null pointer or double and the _poisson # implementation is overloaded for `unsigned int *` and `double`. sub_repl= f"_poisson({float(lamda_match):.1f}, _vectorisation_idx)" codeobj.code.cu_file = re.sub( sub_pattern, sub_repl, codeobj.code.cu_file, count=0 # replace all ocurrences at once ) # If the codeobjec does not need curand states for poisson, check if it needs # them for binomial calls if not codeobj.needs_curand_states: match = re.search('_binomial\w*\(const int vectorisation_idx\)', codeobj.code.cu_file) if match is not None: codeobj.needs_curand_states = True cuda_standalone_device = CUDAStandaloneDevice() all_devices['cuda_standalone'] = cuda_standalone_device
brian-team/brian2cuda
brian2cuda/device.py
Python
gpl-2.0
98,245
[ "Brian" ]
73d7ee1327b330814a0a6742d6b59b10c49aba97ee7cea64c186a2e1a55a7761
""" Course catalog page """ import re from bok_choy.page_object import PageObject from common.test.acceptance.pages.lms import BASE_URL class CacheProgramsPage(PageObject): """ Visit this page to call the cache_programs management command. This page makes a GET request to a view which is only meant to be enabled in testing contexts where the LMS can only be reached over HTTP. Stub the discovery service before visiting this page. """ url = BASE_URL + '/catalog/management/cache_programs/' def is_browser_on_page(self): body = self.q(css='body').text[0] match = re.search(r'programs cached', body, flags=re.IGNORECASE) return True if match else False # lint-amnesty, pylint: disable=simplifiable-if-expression
edx/edx-platform
common/test/acceptance/pages/lms/catalog.py
Python
agpl-3.0
778
[ "VisIt" ]
37ac4e5e4579af16e6b9da175612b3df75a44c5764bdb9e976afe6bbaad54cfc
# ====================================================================== # Given # - a 3D unstructured grid (vtu file) representation of the MV segmentation # (e.g. by CGAL meshing operator, followed by vtk2vtu-Converter), which # only contains the connectivity information of 3D cells with four vertices # (i.e. tetrahedrons) and no other cells of dimension different from three, # and # - a 2D polydata surface representation of the MV segmentation, which # additionally contains matID-information on the MV leaflets and annulus # points, # this script produces a hf3-inp-file suitable for a HiFlow3-based # MVR-simulation. # # The coordinates of the vertices, the connectivity information of the # 3D cells and 2D boundary faces will be written to the hf3-inp-file. # The matIDs of the cells will be determined as follows: # - every 3D cell gets the material id 10. # - surface cells are subdivided into # - upside surfaces on anterior leaflet: matID 17, # - upside surfaces on posterior leaflet: matID 18, # - downside surfaces: matID 20. # NOTE: The result of this script is NOT DETERMINISTIC! This means that it requires # human assessment of the suitability of the results for the simulation algorithm. # NOTE: This version of the script uses CellNormals (as opposed to PointNormals). # NOTE: In order to avoid 'blurry' matID-distribution around the interface between # between the leaflets (near the commissure points), # - either use MITK-remeshing results (2 separate leaflets and 1 complete MV inc IDs), # - or possibly use some vtk filter "subdivision" to refine mesh "smoothing"... # How to run the script: # python vtuToHf3inpIncMVmatIDsProducer.py valve3d_volumeMesh.vtu valve2d_SurfaceIncVertexIDs.vtp outputname_hf3.inp # Author: Nicolai Schoch, EMCL; 2015-04-12. # ====================================================================== __author__ = 'schoch' import numpy as np from numpy import linalg as LA import sys import vtk #from .msmlvtk import * # NEEDED?! def vtu_To_hf3inp_inc_MV_matIDs_Producer(inputfilename, surfaceMesh, outputfilename): # ====================================================================== # Define matIDs -------------------------------------------------------- ID_UP = 21 # preliminary result, which gets overwritten by ID_ANT and ID_POST. ID_DOWN = 20 ID_ANT = 17 ID_POST = 18 # ====================================================================== # get system arguments ------------------------------------------------- valve3dFilename_ = inputfilename valve2dFilename_ = surfaceMesh outputFilename_ = outputfilename print " " print "===========================================================================================" print "=== Execute Python script to produce HiFlow3 inp file (incl. matIDs) for MVR-Simulation ===" print "===========================================================================================" print " " # ====================================================================== # read in files: ------------------------------------------------------- # read in 3d valve # NOTE: ensure that the precedent meshing algorithm (CGAL or similar) # produces consistent/good results w.r.t. the 'normal glyphs'. vtureader = vtk.vtkXMLUnstructuredGridReader() vtureader.SetFileName(valve3dFilename_) vtureader.Update() valve3d_ = vtureader.GetOutput() # get surface mesh of valve3d_ geometryFilter = vtk.vtkGeometryFilter() if vtk.vtkVersion().GetVTKMajorVersion() >= 6: geometryFilter.SetInputData(valve3d_) else: geometryFilter.SetInput(valve3d_) geometryFilter.Update() valve3dSurface_ = geometryFilter.GetOutput() # compute normals of surface mesh normalsSurface_ = vtk.vtkPolyDataNormals() if vtk.vtkVersion().GetVTKMajorVersion() >= 6: normalsSurface_.SetInputData(valve3dSurface_) else: normalsSurface_.SetInput(valve3dSurface_) normalsSurface_.SplittingOn() normalsSurface_.ConsistencyOn() # such that on a surface the normals are oriented either 'all' outward OR 'all' inward. normalsSurface_.AutoOrientNormalsOn() # such that normals point outward or inward. normalsSurface_.ComputePointNormalsOff() # adapt here. On/Off. normalsSurface_.ComputeCellNormalsOn() # adapt here. normalsSurface_.FlipNormalsOff() normalsSurface_.NonManifoldTraversalOn() normalsSurface_.Update() # get cell normals normalsSurfaceRetrieved_ = normalsSurface_.GetOutput().GetCellData().GetNormals() # adapt here. # read in 2d valve ----------------------------------------------------- vtpreader = vtk.vtkXMLPolyDataReader() vtpreader.SetFileName(valve2dFilename_) vtpreader.Update() valve2d_ = vtpreader.GetOutput() # compute normals of valve2d_ normalsValve2d_ = vtk.vtkPolyDataNormals() if vtk.vtkVersion().GetVTKMajorVersion() >= 6: normalsValve2d_.SetInputData(valve2d_) else: normalsValve2d_.SetInput(valve2d_) normalsValve2d_.SplittingOn() normalsValve2d_.ConsistencyOn() normalsValve2d_.ComputePointNormalsOff() # adapt here. normalsValve2d_.ComputeCellNormalsOn() normalsValve2d_.FlipNormalsOff() normalsValve2d_.NonManifoldTraversalOn() normalsValve2d_.Update() # get cell normals normalsValve2dRetrieved_ = normalsValve2d_.GetOutput().GetCellData().GetNormals() # adapt here. print "Reading 3D and 2D-annotated input files: DONE." # ====================================================================== # initialize cell locator for closest cell search ---------------------- # (using vtk methods, that find the closest point in a grid for an arbitrary point in R^3) cellLocator = vtk.vtkCellLocator() cellLocator.SetDataSet(valve2d_) cellLocator.BuildLocator() # ====================================================================== # allocate memory for cell_udlr_list_ (up-down-left-right) ------------- cell_udlr_list_ = [0 for i in range(valve3dSurface_.GetNumberOfCells())] # ====================================================================== # iterate over the cells of the surface and compare normals ------------ for i in range(valve3dSurface_.GetNumberOfCells()): # get cellId of closest point iD = valve3dSurface_.GetCell(i).GetPointId(0) # NOTE: only one (test)point (0) of respective cell testPoint = valve3dSurface_.GetPoint(iD) closestPoint = np.zeros(3) closestPointDist2 = vtk.mutable(0) cellId = vtk.mutable(0) subId = vtk.mutable(0) cellLocator.FindClosestPoint(testPoint, closestPoint, cellId, subId, closestPointDist2) normalSurf_ = np.zeros(3) normalsSurfaceRetrieved_.GetTuple(i, normalSurf_) normalV2d_ = np.zeros(3) normalsValve2dRetrieved_.GetTuple(cellId, normalV2d_) # set cell_udlr_list_ entry to (preliminary) "1", if cell is on upper side of leaflet if np.dot(normalSurf_, normalV2d_) > 0.0: cell_udlr_list_[i] = 1 # NOTE: "cell_udlr_list_[i] = 1" means "cell on upside". # ====================================================================== # iterate over cells on the upper side of the leaflet surface, and set ids for left/right ------------------ kDTree = vtk.vtkKdTreePointLocator() kDTree.SetDataSet(valve2d_) kDTree.BuildLocator() VertexIDs_ = valve2d_.GetPointData().GetArray('VertexIDs') # allocate memory for upCellList_ (indicating if cell is on left/right side) upCellList_ = [i for i in range(valve3dSurface_.GetNumberOfCells()) if cell_udlr_list_[i]] for i in upCellList_: iD = valve3dSurface_.GetCell(i).GetPointId(0) testPoint = valve3dSurface_.GetPoint(iD) result_ = vtk.vtkIdList() counter = 1 cond_ = True while cond_: kDTree.FindClosestNPoints(counter, testPoint, result_) for j in range(result_.GetNumberOfIds()): iD2 = result_.GetId(j) if int(VertexIDs_.GetTuple1(iD2)) == ID_ANT: cond_ = False cell_udlr_list_[i] = 2 # NOTE: "cell_udlr_list_[i] = 2" means "cell on ANT upside". if int(VertexIDs_.GetTuple1(iD2)) == ID_POST: cond_ = False cell_udlr_list_[i] = 3 # NOTE: "cell_udlr_list_[i] = 3" means "cell on POST upside". counter += 1 print "Computing hf3-inp MV matID information: DONE." # ====================================================================== # write results to inp file -------------------------------------------- f = open(outputFilename_, 'w') # write first line s = str(valve3d_.GetNumberOfPoints()) + ' ' + str(valve3dSurface_.GetNumberOfCells()+valve3d_.GetNumberOfCells()) + ' 0 0 0\n' f.write(s) # write point coordinates for i in range(valve3d_.GetNumberOfPoints()): pt = valve3d_.GetPoint(i) s = str(i) + ' ' + str(pt[0]) + ' ' + str(pt[1]) + ' ' + str(pt[2]) + '\n' f.write(s) # write connectivity information of triangles # integer, material id, vertex point ids for i in range(valve3dSurface_.GetNumberOfCells()): cell = valve3dSurface_.GetCell(i) iDs = cell.GetPointIds() if cell_udlr_list_[i] == 2: # NOTE: "cell_udlr_list_[i] = 2" means "cell on ANT upside". matId = ID_ANT elif cell_udlr_list_[i] == 3: # NOTE: "cell_udlr_list_[i] = 3" means "cell on POST upside". matId = ID_POST else: # NOTE: "cell_udlr_list_[i] = 0" means "cell on downside". matId = ID_DOWN s = str(0) + ' ' + str(matId) + ' tri ' + str(iDs.GetId(0)) + ' ' + str(iDs.GetId(1)) + ' ' + str(iDs.GetId(2)) + '\n' f.write(s) # write connectivity information of tetrahedrons # integer, material id, vertex point ids for i in range(valve3d_.GetNumberOfCells()): cell = valve3d_.GetCell(i) iDs = cell.GetPointIds() matId = 10 s = str(0) + ' ' + str(matId) + ' tet ' + str(iDs.GetId(0)) + ' ' + str(iDs.GetId(1)) + ' ' + str(iDs.GetId(2)) + ' ' + str(iDs.GetId(3)) + '\n' f.write(s) # close stream f.close() # ====================================================================== print "Writing HiFlow3 inp output file (incl. MV matIDs): DONE." print "========================================================" print " "
CognitionGuidedSurgery/msml
src/msml/ext/vtuToHf3inpIncMVmatIDsProducer.py
Python
gpl-3.0
10,077
[ "VTK" ]
2763609f740cea233d790624e500ec4c2b2e5e3f73fec75f37662c19cceba905
# -*- coding: utf-8 -*- # Generated by Django 1.11.23 on 2019-11-25 16:16 from django.conf import settings import django.contrib.postgres.fields from django.db import migrations, models import django.utils.timezone class Migration(migrations.Migration): replaces = [ ('seqr', '0001_initial'), ('seqr', '0002_auto_20170309_0751'), ('seqr', '0003_auto_20170313_0315'), ('seqr', '0004_auto_20170313_0853'), ('seqr', '0005_auto_20170319_0332'), ('seqr', '0006_auto_20170430_0044'), ('seqr', '0007_auto_20170509_0046'), ('seqr', '0008_sample_deprecated_base_project'), ('seqr', '0009_uploadedfileforfamily_uploadedfileforindividual'), ('seqr', '0010_auto_20170516_0735'), ('seqr', '0011_auto_20170517_2222'), ('seqr', '0012_auto_20170518_2015'), ('seqr', '0013_auto_20170526_0157'), ('seqr', '0014_auto_20170527_1953'), ('seqr', '0015_auto_20170606_1910'), ('seqr', '0016_auto_20170621_1800'), ('seqr', '0017_auto_20170623_1300'), ('seqr', '0018_auto_20170626_1401'), ('seqr', '0019_auto_20170630_1754'), ('seqr', '0020_auto_20170630_1759'), ('seqr', '0021_dataset_dataset_id'), ('seqr', '0022_auto_20170704_1948'), ('seqr', '0023_auto_20170909_1828'), ('seqr', '0024_auto_20171101_2354'), ('seqr', '0025_auto_20171106_0757'), ('seqr', '0026_auto_20171112_1649'), ('seqr', '0027_auto_20171115_1532'), ('seqr', '0028_auto_20171123_0200'), ('seqr', '0029_auto_20171130_0700'), ('seqr', '0030_auto_20171213_1652'), ('seqr', '0031_auto_20171214_1104'), ('seqr', '0032_auto_20171218_0423'), ('seqr', '0033_auto_20180111_1006'), ('seqr', '0034_auto_20180411_1951'), ('seqr', '0035_variantnote_submit_to_clinvar'), ('seqr', '0036_auto_20180415_2250'), ('seqr', '0037_remove_individual_case_review_status_accepted_for'), ('seqr', '0037_project_disable_staff_access'), ('seqr', '0038_merge_20180530_2034'), ('seqr', '0037_auto_20180515_1440'), ('seqr', '0038_merge_20180525_1800'), ('seqr', '0039_merge_20180531_1604'), ('seqr', '0040_auto_20180612_1513'), ('seqr', '0040_auto_20180603_1309'), ('seqr', '0041_merge_20180621_1247'), ('seqr', '0041_genenote'), ('seqr', '0042_merge_20180625_1529'), ('seqr', '0043_auto_20180719_1212'), ('seqr', '0044_familyanalysedby'), ('seqr', '0044_auto_20180723_1808'), ('seqr', '0045_merge_20180726_1246'), ('seqr', '0045_auto_20180801_1933'), ('seqr', '0046_merge_20180803_1534'), ('seqr', '0046_auto_20180803_1708'), ('seqr', '0047_merge_20180809_1746'), ('seqr', '0048_auto_20181106_1639'), ('seqr', '0049_auto_20190114_2030'), ('seqr', '0050_family_pubmed_ids'), ('seqr', '0051_auto_20190320_2109'), ('seqr', '0052_remove_variantsearchresults_es_index'), ('seqr', '0053_auto_20190405_1525'), ('seqr', '0054_project_has_new_search'), ('seqr', '0055_remove_sample_dataset_name'), ('seqr', '0056_auto_20190513_1621'), ('seqr', '0056_auto_20190424_2059'), ('seqr', '0057_merge_20190513_2009'), ('seqr', '0058_matchmakercontactnotes'), ('seqr', '0059_auto_20190705_1450'), ('seqr', '0060_matchmakerresult_match_removed'), ('seqr', '0061_family_assigned_analyst'), ('seqr', '0061_auto_20190715_1500'), ('seqr', '0062_merge_20190723_1539'), ('seqr', '0063_auto_20190806_1801'), ('seqr', '0064_auto_20190823_2112'), ('seqr', '0063_auto_20190723_1825'), ('seqr', '0065_merge_20190827_2031'), ('seqr', '0066_auto_20191007_1946'), ('seqr', '0067_remove_project_custom_reference_populations') ] initial = True dependencies = [ ('auth', '0008_alter_user_username_max_length'), migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Project', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('guid', models.CharField(db_index=True, max_length=30, unique=True)), ('created_date', models.DateTimeField(db_index=True, default=django.utils.timezone.now)), ('last_modified_date', models.DateTimeField(blank=True, db_index=True, null=True)), ('name', models.TextField()), ('description', models.TextField(blank=True, null=True)), ('is_phenotips_enabled', models.BooleanField(default=False)), ('phenotips_user_id', models.CharField(blank=True, db_index=True, max_length=100, null=True)), ('is_mme_enabled', models.BooleanField(default=True)), ('mme_primary_data_owner', models.TextField(blank=True, default='Samantha Baxter', null=True)), ('last_accessed_date', models.DateTimeField(blank=True, null=True, db_index=True)), ('deprecated_project_id', models.TextField(blank=True, db_index=True, default='')), ('can_edit_group', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='+', to='auth.Group')), ('can_view_group', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='+', to='auth.Group')), ('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)), ('owners_group', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='+', to='auth.Group')), ('mme_contact_url', models.TextField( blank=True, default='mailto:matchmaker@broadinstitute.org', null=True)), ('mme_contact_institution', models.TextField( blank=True, default='Broad Center for Mendelian Genomics', null=True)), ('disease_area', models.CharField(blank=True, choices=[ ('blood', 'Blood'), ('cardio', 'Cardio'), ('kidney', 'Kidney'), ('muscle', 'Muscle'), ('neurodev', 'Neurodev'), ('orphan_disease', 'Orphan Disease'), ('retinal', 'Retinal') ], max_length=20, null=True)), ('is_functional_data_enabled', models.BooleanField(default=False)), ('disable_staff_access', models.BooleanField(default=False)), ('genome_version', models.CharField( choices=[('37', 'GRCh37'), ('38', 'GRCh38')], default='37', max_length=5)), ('has_new_search', models.BooleanField(default=False)), ], options={ 'permissions': (('can_view', 'can_view'), ('can_edit', 'can_edit'), ('is_owner', 'is_owner')), }, ), migrations.CreateModel( name='Family', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('guid', models.CharField(db_index=True, max_length=30, unique=True)), ('created_date', models.DateTimeField(db_index=True, default=django.utils.timezone.now)), ('last_modified_date', models.DateTimeField(blank=True, db_index=True, null=True)), ('family_id', models.CharField(db_index=True, max_length=100)), ('display_name', models.CharField(blank=True, db_index=True, max_length=100, null=True)), ('description', models.TextField(blank=True, null=True)), ('pedigree_image', models.ImageField(blank=True, null=True, upload_to='pedigree_images')), ('analysis_notes', models.TextField(blank=True, null=True)), ('analysis_summary', models.TextField(blank=True, null=True)), ('causal_inheritance_mode', models.CharField(choices=[ ('r', 'recessive'), ('u', 'unknown'), ('d', 'dominant'), ('x', 'x-linked recessive'), ('n', 'de novo')], default='u', max_length=20)), ('analysis_status', models.CharField(choices=[ ('S', 'S'), ('S_kgfp', 'S'), ('S_kgdp', 'S'), ('S_ng', 'S'), ('Sc_kgfp', 'S'), ('Sc_kgdp', 'S'), ('Sc_ng', 'S'), ('Rcpc', 'R'), ('Rncc', 'R'), ('C', 'C'), ('I', 'A'), ('Q', 'W') ], default='Q', max_length=10)), ('internal_analysis_status', models.CharField(blank=True, choices=[ ('S','S'), ('S_kgfp','S'), ('S_kgdp','S'), ('S_ng','S'), ('Sc_kgfp','S'), ('Sc_kgdp', 'S'), ('Sc_ng', 'S'), ('Rcpc', 'R'), ('Rncc', 'R'), ('C', 'C'), ('I', 'A'), ('Q', 'W') ], max_length=10, null=True)), ('internal_case_review_notes', models.TextField(blank=True, null=True)), ('internal_case_review_summary', models.TextField(blank=True, null=True)), ('created_by', models.ForeignKey( blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)), ('project', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='seqr.Project')), ('coded_phenotype', models.TextField(blank=True, null=True)), ('post_discovery_omim_number', models.TextField(blank=True, null=True)), ('pubmed_ids', django.contrib.postgres.fields.ArrayField( base_field=models.TextField(), default=[], size=None)), ('assigned_analyst', models.ForeignKey( null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assigned_families', to=settings.AUTH_USER_MODEL)), ('success_story', models.TextField(blank=True, null=True)), ('success_story_types', django.contrib.postgres.fields.ArrayField( base_field=models.CharField(blank=True, choices=[ ('N', 'Novel Discovery'), ('A', 'Altered Clinical Outcome'), ('C', 'Collaboration'), ('T', 'Technical Win'), ('D', 'Data Sharing'), ('O', 'Other') ], max_length=1, null=True), default=[], size=None)), ], ), migrations.AlterUniqueTogether( name='family', unique_together=set([('project', 'family_id')]), ), migrations.CreateModel( name='Individual', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('guid', models.CharField(db_index=True, max_length=30, unique=True)), ('created_date', models.DateTimeField(db_index=True, default=django.utils.timezone.now)), ('last_modified_date', models.DateTimeField(blank=True, db_index=True, null=True)), ('individual_id', models.TextField(db_index=True)), ('father', models.ForeignKey( blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='paternal_children', to='seqr.Individual')), ('mother', models.ForeignKey( blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='maternal_children', to='seqr.Individual')), ('sex', models.CharField(choices=[ ('M', 'Male'), ('F', 'Female'), ('U', 'Unknown')], default='U', max_length=1)), ('affected', models.CharField(choices=[ ('A', 'Affected'), ('N', 'Unaffected'), ('U', 'Unknown')], default='U', max_length=1)), ('display_name', models.TextField(blank=True, default='')), ('case_review_status', models.CharField(choices=[ ('I', 'In Review'), ('U', 'Uncertain'), ('A', 'Accepted'), ('R', 'Not Accepted'), ('Q', 'More Info Needed'), ('P', 'Pending Results and Records'), ('N', 'NMI Review'), ('W', 'Waitlist') ], default='I', max_length=2)), ('case_review_status_last_modified_date', models.DateTimeField(blank=True, db_index=True, null=True)), ('case_review_discussion', models.TextField(blank=True, null=True)), ('phenotips_patient_id', models.CharField(blank=True, db_index=True, max_length=30, null=True)), ('phenotips_eid', models.CharField(blank=True, max_length=165, null=True)), ('phenotips_data', models.TextField(blank=True, null=True)), ('mme_id', models.CharField(blank=True, max_length=50, null=True)), ('mme_submitted_data', django.contrib.postgres.fields.jsonb.JSONField(null=True)), ('case_review_status_last_modified_by', models.ForeignKey( blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)), ('created_by', models.ForeignKey( blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)), ('family', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='seqr.Family')), ('notes', models.TextField(blank=True, null=True)), ('mme_deleted_by', models.ForeignKey( null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)), ('mme_deleted_date', models.DateTimeField(null=True)), ('mme_submitted_date', models.DateTimeField(null=True)), ('filter_flags', django.contrib.postgres.fields.jsonb.JSONField(null=True)), ('pop_platform_filters', django.contrib.postgres.fields.jsonb.JSONField(null=True)), ('population', models.CharField(max_length=5, null=True)), ], ), migrations.AlterUniqueTogether( name='individual', unique_together=set([('family', 'individual_id')]), ), migrations.CreateModel( name='Sample', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('guid', models.CharField(db_index=True, max_length=30, unique=True)), ('created_date', models.DateTimeField(db_index=True, default=django.utils.timezone.now)), ('last_modified_date', models.DateTimeField(blank=True, db_index=True, null=True)), ('sample_id', models.TextField(db_index=True)), ('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)), ('loaded_date', models.DateTimeField(blank=True, null=True)), ('individual', models.ForeignKey( null=True, on_delete=django.db.models.deletion.PROTECT, to='seqr.Individual')), ('sample_type', models.CharField(blank=True, choices=[ ('WES', 'Exome'), ('WGS', 'Whole Genome'), ('RNA', 'RNA'), ('ARRAY', 'ARRAY') ], max_length=20, null=True)), ('elasticsearch_index', models.TextField(blank=True, db_index=True, null=True)), ('is_active', models.BooleanField(default=False)), ('dataset_file_path', models.TextField(blank=True, db_index=True, null=True)), ('dataset_type', models.CharField(blank=True, choices=[ ('ALIGN', 'Alignment'), ('VARIANTS', 'Variant Calls'), ('SV', 'SV Calls'), ('BREAK', 'Breakpoints'), ('SPLICE', 'Splice Junction Calls'), ('ASE', 'Allele Specific Expression') ], max_length=20, null=True), ), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='ProjectCategory', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('guid', models.CharField(db_index=True, max_length=30, unique=True)), ('created_date', models.DateTimeField(db_index=True, default=django.utils.timezone.now)), ('last_modified_date', models.DateTimeField(blank=True, db_index=True, null=True)), ('name', models.TextField(db_index=True)), ('created_by', models.ForeignKey( blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)), ('projects', models.ManyToManyField(to='seqr.Project')), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='ProjectLastAccessedDate', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('last_accessed_date', models.DateTimeField(auto_now=True, db_index=True)), ('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='seqr.Project')), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='SavedVariant', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('guid', models.CharField(db_index=True, max_length=30, unique=True)), ('created_date', models.DateTimeField(db_index=True, default=django.utils.timezone.now)), ('last_modified_date', models.DateTimeField(blank=True, db_index=True, null=True)), ('xpos_start', models.BigIntegerField()), ('xpos_end', models.BigIntegerField(null=True)), ('ref', models.TextField()), ('alt', models.TextField()), ('saved_variant_json', django.contrib.postgres.fields.jsonb.JSONField(default=dict)), ('created_by', models.ForeignKey( blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)), ('family', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='seqr.Family')), ('selected_main_transcript_id', models.CharField(max_length=20, null=True)) ], ), migrations.AlterUniqueTogether( name='savedvariant', unique_together=set([('xpos_start', 'xpos_end', 'ref', 'alt', 'family')]), ), migrations.CreateModel( name='VariantNote', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('guid', models.CharField(db_index=True, max_length=30, unique=True)), ('created_date', models.DateTimeField(db_index=True, default=django.utils.timezone.now)), ('last_modified_date', models.DateTimeField(blank=True, db_index=True, null=True)), ('note', models.TextField(blank=True, null=True)), ('search_parameters', models.TextField(blank=True, null=True)), ('created_by', models.ForeignKey( blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)), ('submit_to_clinvar', models.BooleanField(default=False)), ('search_hash', models.CharField(max_length=50, null=True)), ('saved_variant', models.ForeignKey( null=True, on_delete=django.db.models.deletion.CASCADE, to='seqr.SavedVariant')) ], options={ 'abstract': False, }, ), migrations.CreateModel( name='VariantTagType', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('guid', models.CharField(db_index=True, max_length=30, unique=True)), ('created_date', models.DateTimeField(db_index=True, default=django.utils.timezone.now)), ('last_modified_date', models.DateTimeField(blank=True, db_index=True, null=True)), ('name', models.TextField()), ('description', models.TextField(blank=True, null=True)), ('color', models.CharField(default='#1f78b4', max_length=20)), ('created_by', models.ForeignKey( blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)), ('project', models.ForeignKey( null=True, on_delete=django.db.models.deletion.CASCADE, to='seqr.Project')), ('category', models.TextField(blank=True, null=True)), ('order', models.FloatField(null=True)), ('is_built_in', models.BooleanField(default=False)) ], ), migrations.AlterUniqueTogether( name='varianttagtype', unique_together=set([('project', 'name', 'color')]), ), migrations.CreateModel( name='VariantTag', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('guid', models.CharField(db_index=True, max_length=30, unique=True)), ('created_date', models.DateTimeField(db_index=True, default=django.utils.timezone.now)), ('last_modified_date', models.DateTimeField(blank=True, db_index=True, null=True)), ('search_parameters', models.TextField(blank=True, null=True)), ('created_by', models.ForeignKey( blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)), ('variant_tag_type', models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, to='seqr.VariantTagType')), ('saved_variant', models.ForeignKey( null=True, on_delete=django.db.models.deletion.CASCADE, to='seqr.SavedVariant')), ('search_hash', models.CharField(max_length=50, null=True)) ], ), migrations.AlterUniqueTogether( name='varianttag', unique_together=set([('variant_tag_type', 'saved_variant')]), ), migrations.CreateModel( name='VariantFunctionalData', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('guid', models.CharField(db_index=True, max_length=30, unique=True)), ('created_date', models.DateTimeField(db_index=True, default=django.utils.timezone.now)), ('last_modified_date', models.DateTimeField(blank=True, db_index=True, null=True)), ('functional_data_tag', models.TextField(choices=[ ('Functional Data', ( ('Biochemical Function', '{"description": "Gene product performs a biochemical function shared with other known genes in the disease of interest, or consistent with the phenotype.", "color": "#311B92"}'), ('Protein Interaction', '{"description": "Gene product interacts with proteins previously implicated (genetically or biochemically) in the disease of interest.", "color": "#4A148C"}'), ('Expression', '{"description": "Gene is expressed in tissues relevant to the disease of interest and/or is altered in expression in patients who have the disease.", "color": "#7C4DFF"}'), ('Patient Cells', '{"description": "Gene and/or gene product function is demonstrably altered in patients carrying candidate mutations.", "color": "#B388FF"}'), ('Non-patient cells', '{"description": "Gene and/or gene product function is demonstrably altered in human cell culture models carrying candidate mutations.", "color": "#9575CD"}'), ('Animal Model', '{"description": "Non-human animal models with a similarly disrupted copy of the affected gene show a phenotype consistent with human disease state.", "color": "#AA00FF"}'), ('Non-human cell culture model', '{"description": "Non-human cell-culture models with a similarly disrupted copy of the affected gene show a phenotype consistent with human disease state.", "color": "#BA68C8"}'), ('Rescue', '{"description": "The cellular phenotype in patient-derived cells or engineered equivalents can be rescued by addition of the wild-type gene product.", "color": "#663399"}'))), ('Functional Scores', ( ('Genome-wide Linkage', '{"metadata_title": "LOD Score", "description": "Max LOD score used in analysis to restrict where you looked for causal variants; provide best score available, whether it be a cumulative LOD score across multiple families or just the best family\'s LOD score.", "color": "#880E4F"}'), ('Bonferroni corrected p-value', '{"metadata_title": "P-value", "description": "Bonferroni-corrected p-value for gene if association testing/burden testing/etc was used to identify the gene.", "color": "#E91E63"}'), ('Kindreds w/ Overlapping SV & Similar Phenotype', '{"metadata_title": "#", "description": "Number of kindreds (1+) previously reported/in databases as having structural variant overlapping the gene and a similar phenotype.", "color": "#FF5252"}'))), ('Additional Kindreds (Literature, MME)', ( ('Additional Unrelated Kindreds w/ Causal Variants in Gene', '{"metadata_title": "# additional families", "description": "Number of additional kindreds with causal variants in this gene (Any other kindreds from collaborators, MME, literature etc). Do not count your family in this total.", "color": "#D84315"}'), ))])), ('metadata', models.TextField(null=True)), ('search_parameters', models.TextField(blank=True, null=True)), ('created_by', models.ForeignKey( blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)), ('saved_variant', models.ForeignKey( null=True, on_delete=django.db.models.deletion.CASCADE, to='seqr.SavedVariant')), ('search_hash', models.CharField(max_length=50, null=True)), ], ), migrations.AlterUniqueTogether( name='variantfunctionaldata', unique_together=set([('functional_data_tag', 'saved_variant')]), ), migrations.CreateModel( name='UploadedFileForFamily', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.TextField()), ('uploaded_file', models.FileField(max_length=200, upload_to='uploaded_family_files')), ('uploaded_date', models.DateTimeField(blank=True, null=True)), ('family', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='seqr.Family')), ('uploaded_by', models.ForeignKey( null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='UploadedFileForIndividual', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.TextField()), ('uploaded_file', models.FileField(max_length=200, upload_to='uploaded_individual_files')), ('uploaded_date', models.DateTimeField(blank=True, null=True)), ('individual', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='seqr.Individual')), ('uploaded_by', models.ForeignKey( null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='LocusList', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('guid', models.CharField(db_index=True, max_length=30, unique=True)), ('created_date', models.DateTimeField(db_index=True, default=django.utils.timezone.now)), ('last_modified_date', models.DateTimeField(blank=True, db_index=True, null=True)), ('name', models.TextField(db_index=True)), ('description', models.TextField(blank=True, null=True)), ('is_public', models.BooleanField(default=False)), ('created_by', models.ForeignKey( blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL,related_name='+', to=settings.AUTH_USER_MODEL)), ], options={ 'permissions': (('can_view', 'can_view'), ('can_edit', 'can_edit'), ('is_owner', 'is_owner')), }, ), migrations.AlterUniqueTogether( name='locuslist', unique_together=set([('name', 'description', 'is_public', 'created_by')]), ), migrations.CreateModel( name='LocusListGene', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('guid', models.CharField(db_index=True, max_length=30, unique=True)), ('created_date', models.DateTimeField(db_index=True, default=django.utils.timezone.now)), ('last_modified_date', models.DateTimeField(blank=True, db_index=True, null=True)), ('gene_id', models.TextField(db_index=True)), ('description', models.TextField(blank=True, null=True)), ('created_by', models.ForeignKey( blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)), ('locus_list', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='seqr.LocusList')), ], ), migrations.AlterUniqueTogether( name='locuslistgene', unique_together=set([('locus_list', 'gene_id')]), ), migrations.CreateModel( name='LocusListInterval', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('guid', models.CharField(db_index=True, max_length=30, unique=True)), ('created_date', models.DateTimeField(db_index=True, default=django.utils.timezone.now)), ('last_modified_date', models.DateTimeField(blank=True, db_index=True, null=True)), ('genome_version', models.CharField( choices=[('37', 'GRCh37'), ('38', 'GRCh38')], default='37', max_length=5)), ('chrom', models.CharField(max_length=2)), ('start', models.IntegerField()), ('end', models.IntegerField()), ('description', models.TextField(blank=True, null=True)), ('created_by', models.ForeignKey( blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)), ('locus_list', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='seqr.LocusList')) ], ), migrations.AlterUniqueTogether( name='locuslistinterval', unique_together=set([('locus_list', 'genome_version', 'chrom', 'start', 'end')]), ), migrations.CreateModel( name='GeneNote', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('guid', models.CharField(db_index=True, max_length=30, unique=True)), ('created_date', models.DateTimeField(db_index=True, default=django.utils.timezone.now)), ('last_modified_date', models.DateTimeField(blank=True, db_index=True, null=True)), ('note', models.TextField(blank=True, default='')), ('gene_id', models.CharField(max_length=20)), ('created_by', models.ForeignKey( blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='FamilyAnalysedBy', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('guid', models.CharField(db_index=True, max_length=30, unique=True)), ('created_date', models.DateTimeField(db_index=True, default=django.utils.timezone.now)), ('last_modified_date', models.DateTimeField(blank=True, db_index=True, null=True)), ('created_by', models.ForeignKey( blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)), ('family', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='seqr.Family')), ], ), migrations.CreateModel( name='AnalysisGroup', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('guid', models.CharField(db_index=True, max_length=30, unique=True)), ('created_date', models.DateTimeField(db_index=True, default=django.utils.timezone.now)), ('last_modified_date', models.DateTimeField(blank=True, db_index=True, null=True)), ('name', models.TextField()), ('description', models.TextField(blank=True, null=True)), ('created_by', models.ForeignKey( blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)), ('families', models.ManyToManyField(to='seqr.Family')), ('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='seqr.Project')), ], ), migrations.AlterUniqueTogether( name='analysisgroup', unique_together=set([('project', 'name')]), ), migrations.CreateModel( name='VariantSearch', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('guid', models.CharField(db_index=True, max_length=30, unique=True)), ('created_date', models.DateTimeField(db_index=True, default=django.utils.timezone.now)), ('last_modified_date', models.DateTimeField(blank=True, db_index=True, null=True)), ('name', models.CharField(max_length=200, null=True)), ('search', django.contrib.postgres.fields.jsonb.JSONField()), ('created_by', models.ForeignKey( blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)), ], ), migrations.AlterUniqueTogether( name='variantsearch', unique_together=set([('created_by', 'name')]), ), migrations.CreateModel( name='VariantSearchResults', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('guid', models.CharField(db_index=True, max_length=30, unique=True)), ('created_date', models.DateTimeField(db_index=True, default=django.utils.timezone.now)), ('last_modified_date', models.DateTimeField(blank=True, db_index=True, null=True)), ('search_hash', models.CharField(db_index=True, max_length=50, unique=True)), ('created_by', models.ForeignKey( blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)), ('families', models.ManyToManyField(to='seqr.Family')), ('variant_search', models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, to='seqr.VariantSearch')), ], ), migrations.CreateModel( name='MatchmakerResult', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('guid', models.CharField(db_index=True, max_length=30, unique=True)), ('created_date', models.DateTimeField(db_index=True, default=django.utils.timezone.now)), ('last_modified_date', models.DateTimeField(blank=True, db_index=True, null=True)), ('result_data', django.contrib.postgres.fields.jsonb.JSONField()), ('we_contacted', models.BooleanField(default=False)), ('host_contacted', models.BooleanField(default=False)), ('deemed_irrelevant', models.BooleanField(default=False)), ('flag_for_analysis', models.BooleanField(default=False)), ('comments', models.TextField(blank=True, null=True)), ('created_by', models.ForeignKey( blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)), ('individual', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='seqr.Individual')), ('last_modified_by', models.ForeignKey( null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)), ('match_removed', models.BooleanField(default=False)), ], ), migrations.CreateModel( name='MatchmakerContactNotes', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('guid', models.CharField(db_index=True, max_length=30, unique=True)), ('created_date', models.DateTimeField(db_index=True, default=django.utils.timezone.now)), ('last_modified_date', models.DateTimeField(blank=True, db_index=True, null=True)), ('institution', models.CharField(db_index=True, max_length=200, unique=True)), ('comments', models.TextField(blank=True)), ('created_by', models.ForeignKey( blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)), ], ), ]
macarthur-lab/seqr
seqr/migrations/0001_squashed_0067_remove_project_custom_reference_populations.py
Python
agpl-3.0
40,222
[ "ASE" ]
b4562f3c4b646d2115dc415b4138e140c01bd160b81da5c6b414dfdb00d1f664
#!/usr/bin/env python # Copyright 2015 The Kubernetes Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import argparse import glob import json import mmap import os import re import sys parser = argparse.ArgumentParser() parser.add_argument("filenames", help="list of files to check, all files if unspecified", nargs='*') args = parser.parse_args() rootdir = os.path.dirname(__file__) + "/../../" rootdir = os.path.abspath(rootdir) def get_refs(): refs = {} for path in glob.glob(os.path.join(rootdir, "verify/boilerplate/boilerplate.*.txt")): extension = os.path.basename(path).split(".")[1] ref_file = open(path, 'r') ref = ref_file.read().splitlines() ref_file.close() refs[extension] = ref return refs def file_passes(filename, refs, regexs): try: f = open(filename, 'r') except: return False data = f.read() f.close() extension = file_extension(filename) ref = refs[extension] # remove build tags from the top of Go files if extension == "go": p = regexs["go_build_constraints"] (data, found) = p.subn("", data, 1) # remove shebang from the top of shell files if extension == "sh": p = regexs["shebang"] (data, found) = p.subn("", data, 1) data = data.splitlines() # if our test file is smaller than the reference it surely fails! if len(ref) > len(data): return False # trim our file to the same number of lines as the reference file data = data[:len(ref)] p = regexs["year"] for d in data: if p.search(d): return False # Replace all occurrences of the regex "2016|2015|2014" with "YEAR" p = regexs["date"] for i, d in enumerate(data): (data[i], found) = p.subn('YEAR', d) if found != 0: break # if we don't match the reference at this point, fail if ref != data: return False return True def file_extension(filename): return os.path.splitext(filename)[1].split(".")[-1].lower() skipped_dirs = ['Godeps', 'third_party', '_output', '.git', 'vendor'] def normalize_files(files): newfiles = [] for pathname in files: if any(x in pathname for x in skipped_dirs): continue newfiles.append(pathname) for i, pathname in enumerate(newfiles): if not os.path.isabs(pathname): newfiles[i] = os.path.join(rootdir, pathname) return newfiles def get_files(extensions): files = [] if len(args.filenames) > 0: files = args.filenames else: for root, dirs, walkfiles in os.walk(rootdir): # don't visit certain dirs. This is just a performance improvement # as we would prune these later in normalize_files(). But doing it # cuts down the amount of filesystem walking we do and cuts down # the size of the file list for d in skipped_dirs: if d in dirs: dirs.remove(d) for name in walkfiles: pathname = os.path.join(root, name) files.append(pathname) files = normalize_files(files) outfiles = [] for pathname in files: extension = file_extension(pathname) if extension in extensions: outfiles.append(pathname) return outfiles def get_regexs(): regexs = {} # Search for "YEAR" which exists in the boilerplate, but shouldn't in the real thing regexs["year"] = re.compile( 'YEAR' ) # dates can be 2014 or 2015, company holder names can be anything regexs["date"] = re.compile( '(2014|2015|2016|2017)' ) # strip // +build \n\n build constraints regexs["go_build_constraints"] = re.compile(r"^(// \+build.*\n)+\n", re.MULTILINE) # strip #!.* from shell scripts regexs["shebang"] = re.compile(r"^(#!.*\n)\n*", re.MULTILINE) return regexs def main(): regexs = get_regexs() refs = get_refs() filenames = get_files(refs.keys()) for filename in filenames: if not file_passes(filename, refs, regexs): print(filename, file=sys.stdout) if __name__ == "__main__": sys.exit(main())
gmarek/perf-tests
verify/boilerplate/boilerplate.py
Python
apache-2.0
4,740
[ "VisIt" ]
adc08173258e6be06df613a53178055d5331357e2cd039456d9a8a3c517a6713
import json from coalib.bearlib import deprecate_settings from coalib.bearlib.abstractions.Linter import linter from dependency_management.requirements.NpmRequirement import NpmRequirement from coala_utils.param_conversion import negate def bool_or_str(value): try: return bool(value) except: return str(value) def bool_or_int(value): try: return bool(value) except: return int(value) @linter(executable='jshint', output_format='regex', output_regex=r'.+?: line (?P<line>\d+), col (?P<column>\d+), ' r'(?P<message>.+) \((?P<severity>[EWI])\d+\)') class JSHintBear: """ Detect errors and potential problems in JavaScript code and to enforce appropriate coding conventions. For example, problems like syntax errors, bugs due to implicit type conversion, leaking variables and much more can be detected. For more information on the analysis visit <http://jshint.com/> """ LANGUAGES = {'JavaScript'} REQUIREMENTS = {NpmRequirement('jshint', '2')} AUTHORS = {'The coala developers'} AUTHORS_EMAILS = {'coala-devel@googlegroups.com'} LICENSE = 'AGPL-3.0' CAN_DETECT = {'Formatting', 'Syntax', 'Complexity', 'Unused Code'} @staticmethod @deprecate_settings(es_version='use_es6_syntax', javascript_strictness=( 'allow_global_strict', lambda x: 'global' if x else True), cyclomatic_complexity='maxcomplexity', allow_unused_variables=('prohibit_unused', negate), max_parameters='maxparams', allow_missing_semicolon='allow_missing_semicol', allow_this_statements='allow_this_stmt', allow_with_statements='allow_with_stmt', allow_bitwise_operators=('prohibit_bitwise', negate), max_statements='maxstatements', max_depth='maxdepth', allow_comma_operator=('prohibit_comma', negate), allow_non_breaking_whitespace=( 'prohibit_non_breaking_whitespace', negate), allow_prototype_overwrite=( 'prohibit_prototype_overwrite', negate), allow_type_coercion=('prohibit_type_coercion', negate), allow_future_identifiers=('future_hostile', negate), allow_typeof=('prohibit_typeof', negate), allow_var_statement=( 'prohibit_variable_statements', negate), allow_grouping_operator=('prohibit_groups', negate), allow_variable_shadowing='shadow', use_mozilla_extension='using_mozilla', allow_constructor_functions=('prohibit_new', negate), allow_argument_caller_and_callee=( 'prohibit_arg', negate), allow_iterator_property=('iterator', negate), allow_filter_in_forin='force_filter_forin') def generate_config(filename, file, allow_bitwise_operators: bool=False, allow_prototype_overwrite: bool=False, force_braces: bool=True, allow_type_coercion: bool=False, allow_future_identifiers: bool=True, allow_typeof: bool=True, allow_filter_in_forin: bool=True, allow_funcscope: bool=False, allow_iterator_property: bool=True, allow_argument_caller_and_callee: bool=False, allow_comma_operator: bool=True, allow_non_breaking_whitespace: bool=False, allow_constructor_functions: bool=True, allow_grouping_operator: bool=True, allow_var_statement: bool=True, allow_missing_semicolon: bool=False, allow_debugger: bool=False, allow_assignment_comparisions: bool=False, allow_eval: bool=False, allow_increment: bool=False, allow_proto: bool=False, allow_scripturls: bool=False, allow_singleton: bool=False, allow_this_statements: bool=False, allow_with_statements: bool=False, use_mozilla_extension: bool=False, javascript_strictness: bool_or_str=True, allow_noyield: bool=False, allow_eqnull: bool=False, allow_last_semicolon: bool=False, allow_func_in_loop: bool=False, allow_expr_in_assignments: bool=False, use_es3_array: bool=False, environment_mootools: bool=False, environment_couch: bool=False, environment_jasmine: bool=False, environment_jquery: bool=False, environment_node: bool=False, environment_qunit: bool=False, environment_rhino: bool=False, environment_shelljs: bool=False, environment_prototypejs: bool=False, environment_yui: bool=False, environment_mocha: bool=True, environment_module: bool=False, environment_wsh: bool=False, environment_worker: bool=False, environment_nonstandard: bool=False, environment_browser: bool=True, environment_browserify: bool=False, environment_devel: bool=True, environment_dojo: bool=False, environment_typed: bool=False, environment_phantom: bool=False, max_statements: bool_or_int=False, max_depth: bool_or_int=False, max_parameters: bool_or_int=False, cyclomatic_complexity: bool_or_int=False, allow_variable_shadowing: bool_or_str=False, allow_unused_variables: bool_or_str=False, allow_latedef: bool_or_str=False, es_version: bool_or_int=5, jshint_config: str=''): """ :param allow_bitwise_operators: Allows the use of bitwise operators. :param allow_prototype_overwrite: This options allows overwriting prototypes of native objects such as ``Array``. :param force_braces: This option requires you to always put curly braces around blocks in loops and conditionals. :param allow_type_coercion: This options allows the use of ``==`` and ``!=``. :param allow_future_identifiers: This option allows the use of identifiers which are defined in future versions of JavaScript. :param allow_typeof: This option enables warnings about invalid ``typeof`` operator values. :param allow_filter_in_forin: This option requires all ``for in`` loops to filter object's items. :param allow_iterator_property: This option suppresses warnings about the ``__iterator__`` property. :param allow_funcscope: This option suppresses warnings about declaring variables inside of control structures while accessing them later from outside. :param allow_argument_caller_and_callee: This option allows the use of ``arguments.caller`` and ``arguments.callee``. :param allow_comma_operator: This option allows the use of the comma operator. :param allow_non_breaking_whitespace: Allows "non-breaking whitespace characters". :param allow_constructor_functions: Allows the use of constructor functions. :param allow_grouping_operator: This option allows the use of the grouping operator when it is not strictly required. :param allow_var_statement: Allows the use of the ``var`` statement while declaring a variable. Should use ``let`` or ``const`` while it is set to ``False``. :param allow_missing_semicolon: This option suppresses warnings about missing semicolons. :param allow_debugger: This option suppresses warnings about the ``debugger`` statements. :param allow_assignment_comparisions: This option suppresses warnings about the use of assignments in cases where comparisons are expected. :param allow_eval: This options suppresses warnings about the use of ``eval`` function. :param allow_increment: This option suppresses warnings about the use of unary increment and decrement operators. :param allow_proto: This option suppresses warnings about the ``__proto__`` property. :param allow_scripturls: This option suppresses warnings about the use of script-targeted URLs. :param allow_singleton: This option suppresses warnings about constructions like ``new function () { ... }`` and ``new Object;`` sometimes used to produce singletons. :param allow_this_statements: This option suppresses warnings about possible strict violations when the code is running in strict mode and ``this`` is used in a non-constructor function. :param allow_with_statements: This option suppresses warnings about the use of the ``with`` statement. :param use_mozilla_extension: This options tells JSHint that your code uses Mozilla JavaScript extensions. :param javascript_strictness: Determines what sort of strictness to use in the JavaScript code. The possible options are: - "global" - there must be a ``"use strict";`` at global level - "implied" - lint the code as if there is a ``"use strict";`` - "False" - disable warnings about strict mode - "True" - there must be a ``"use strict";`` at function level :param allow_noyield: This option suppresses warnings about generator functions with no ``yield`` statement in them. :param allow_eqnull: This option suppresses warnings about ``== null`` comparisons. :param allow_last_semicolon: This option suppresses warnings about missing semicolons for the last statement. :param allow_func_in_loop: This option suppresses warnings about functions inside of loops. :param allow_expr_in_assignments: This option suppresses warnings about the use of expressions where normally assignments or function calls are expected. :param use_es3_array: This option tells JSHintBear ES3 array elision elements, or empty elements are used. :param environment_mootools: This option defines globals exposed by the Mootools. :param environment_couch: This option defines globals exposed by CouchDB. :param environment_jasmine: This option defines globals exposed by Jasmine. :param environment_jquery: This option defines globals exposed by Jquery. :param environment_node: This option defines globals exposed by Node. :param environment_qunit: This option defines globals exposed by Qunit. :param environment_rhino: This option defines globals exposed when the code is running inside rhino runtime environment. :param environment_shelljs: This option defines globals exposed by the ShellJS. :param environment_prototypejs: This option defines globals exposed by the Prototype. :param environment_yui: This option defines globals exposed by the YUI JavaScript Framework. :param environment_mocha: This option defines globals exposed by the "BDD" and "TDD" UIs of the Mocha unit testing framework. :param environment_module: This option informs JSHintBear that the input code describes an ECMAScript 6 module. :param environment_wsh: This option defines globals available when the code is running as a script for the Windows Script Host. :param environment_worker: This option defines globals available when the code is running inside of a Web Worker. :param environment_nonstandard: This option defines non- standard but widely adopted globals such as ``escape`` and ``unescape``. :param environment_browser: This option defines globals exposed by modern browsers. :param environment_browserify: This option defines globals available when using the Browserify. :param environment_devel: This option defines globals that are usually used for debugging: ``console``, ``alert``, etc. :param environment_dojo: This option defines globals exposed by the Dojo Toolkit. :param environment_typed: This option defines globals for typed array constructors. :param environment_phantom: This option defines globals available when your core is running inside of the PhantomJS runtime environment. :param max_statements: Maximum number of statements allowed per function. :param max_depth: This option lets you control how nested do you want your blocks to be. :param max_parameters: Maximum number of parameters allowed per function. :param cyclomatic_complexity: Maximum cyclomatic complexity in the code. :param allow_variable_shadowing: This option suppresses warnings about variable shadowing i.e. declaring a variable that had been already declared somewhere in the outer scope. - "inner" - check for variables defined in the same scope only - "outer" - check for variables defined in outer scopes as well - False - same as inner - True - allow variable shadowing :param allow_unused_variables: Allows when variables are defined but never used. This can be set to ""vars"" to only check for variables, not function parameters, or ""strict"" to check all variables and parameters. :param allow_latedef: This option allows the use of a variable before it was defined. Setting this option to "nofunc" will allow function declarations to be ignored. :param es_version: This option is used to specify the ECMAScript version to which the code must adhere to. """ # Assume that when es_version is bool, it is intended for the # deprecated use_es6_version if es_version is True: es_version = 6 elif es_version is False: es_version = 5 if not jshint_config: options = {'bitwise': not allow_bitwise_operators, 'freeze': not allow_prototype_overwrite, 'curly': force_braces, 'eqeqeq': not allow_type_coercion, 'futurehostile': not allow_future_identifiers, 'notypeof': not allow_typeof, 'forin': allow_filter_in_forin, 'funcscope': allow_funcscope, 'iterator': not allow_iterator_property, 'noarg': not allow_argument_caller_and_callee, 'nocomma': not allow_comma_operator, 'nonbsp': not allow_non_breaking_whitespace, 'nonew': not allow_constructor_functions, 'undef': True, 'singleGroups': not allow_grouping_operator, 'varstmt': not allow_var_statement, 'asi': allow_missing_semicolon, 'debug': allow_debugger, 'boss': allow_assignment_comparisions, 'evil': allow_eval, 'strict': javascript_strictness, 'plusplus': allow_increment, 'proto': allow_proto, 'scripturl': allow_scripturls, 'supernew': allow_singleton, 'validthis': allow_this_statements, 'withstmt': allow_with_statements, 'moz': use_mozilla_extension, 'noyield': allow_noyield, 'eqnull': allow_eqnull, 'lastsemic': allow_last_semicolon, 'loopfunc': allow_func_in_loop, 'expr': allow_expr_in_assignments, 'elision': use_es3_array, 'mootools': environment_mootools, 'couch': environment_couch, 'jasmine': environment_jasmine, 'jquery': environment_jquery, 'node': environment_node, 'qunit': environment_qunit, 'rhino': environment_rhino, 'shelljs': environment_shelljs, 'prototypejs': environment_prototypejs, 'yui': environment_yui, 'mocha': environment_mocha, 'module': environment_module, 'wsh': environment_wsh, 'worker': environment_worker, 'nonstandard': environment_nonstandard, 'browser': environment_browser, 'browserify': environment_browserify, 'devel': environment_devel, 'dojo': environment_dojo, 'typed': environment_typed, 'phantom': environment_phantom, 'maxerr': 99999, 'maxcomplexity': cyclomatic_complexity, 'maxdepth': max_depth, 'maxparams': max_parameters, 'maxstatements': max_statements, 'shadow': allow_variable_shadowing, 'unused': not allow_unused_variables, 'latedef': allow_latedef, 'esversion': es_version} return json.dumps(options) else: return None @staticmethod def create_arguments(filename, file, config_file, jshint_config: str=''): """ :param jshint_config: The location of the jshintrc config file. If this option is present all the above options are not used. Instead the .jshintrc file is used as the configuration file. """ args = ('--verbose', filename, '--config') if jshint_config: args += (jshint_config,) else: args += (config_file,) return args
vijeth-aradhya/coala-bears
bears/js/JSHintBear.py
Python
agpl-3.0
20,197
[ "VisIt" ]
cedba6cb206d8a4c143451b11d5b45c1433505920d681b0788fc9ae33a5972eb
#!/usr/bin/python2.7 # encoding: utf-8 from __future__ import division from itertools import groupby from operator import itemgetter import datetime # Parallel computing import multiprocessing as mp #Local import from pyseidon.utilities.regioner import * from pyseidon.utilities.miscellaneous import time_to_index from pyseidon.utilities.miscellaneous import mattime_to_datetime class _load_var: """ **'Variables' subset in FVCOM class contains the numpy arrays** Some variables are directly passed on from FVCOM output: :: _el = elevation (m), 2D array (ntime, nnode) |_julianTime = julian date, 1D array (ntime) |_matlabTime = matlab time, 1D array (ntime) |_tauc = bottom shear stress (m2/s2), | 2D array (ntime, nele) |_ua = depth averaged u velocity component (m/s), | 2D array (ntime, nele) |_va = depth averaged v velocity component (m/s), FVCOM.Variables._| 2D array (ntime, nele) |_u = u velocity component (m/s), | 3D array (ntime, nlevel, nele) |_v = v velocity component (m/s), | 3D array (ntime, nlevel, nele) |_w = w velocity component (m/s), | 3D array (ntime, nlevel, nele) Some others shall be generated as methods are being called, ex: :: ... |_hori_velo_norm = horizontal velocity norm (m/s), | 2D array (ntime, nele) |_velo_norm = velocity norm (m/s), | 3D array (ntime, nlevel, nele) |_verti_shear = vertical shear (1/s), | 3D array (ntime, nlevel, nele) |_vorticity... """ def __init__(self, data, grid, tx, History, debug=False): self._debug = debug self._3D = False self._opendap = type(data.variables).__name__=='DatasetType' # Pointer to History setattr(self, '_History', History) # Parallel computing attributs #self._cpus = mp.cpu_count() #List of keywords kwl2D = ['ua', 'va', 'zeta','depth_av_flow_dir', 'hori_velo_norm', 'depth_av_vorticity', 'depth_av_power_density', 'depth_av_power_assessment', 'tauc'] kwl3D = ['ww', 'u', 'v', 'gls', 'tke', 'flow_dir', 'velo_norm', 'verti_shear', 'vorticity', 'power_density'] #List of aliaSes al2D = ['ua', 'va', 'el','depth_av_flow_dir', 'hori_velo_norm', 'depth_av_vorticity', 'depth_av_power_density', 'depth_av_power_assessment', 'tauc'] al3D = ['w', 'u', 'v', 'gls', 'tke', 'flow_dir', 'velo_norm', 'verti_shear', 'vorticity', 'power_density'] # Figure out which quantity to treat self._kwl2D = [] self._al2D = [] for key, aliaS in zip(kwl2D, al2D): if key in data.variables.keys(): self._kwl2D.append(key) self._al2D.append(aliaS) else: if debug: print key, " is missing !" self._kwl3D = [] self._al3D = [] for key, aliaS in zip(kwl3D, al3D): if key in data.variables.keys(): self._kwl3D.append(key) self._al3D.append(aliaS) else: if debug: print key, " is missing !" if not len(self._kwl3D)==0: self._3D = True #Loading time stamps try: self.julianTime = data.variables['time'] except KeyError: #exeception due to Save_as(netcdf) self.julianTime = data.variables['julianTime'] if tx==[]: # get time and adjust it to matlab datenum try: self.julianTime = data.variables['time'].data except (KeyError, AttributeError) as e: #exeception due to Save_as(netcdf) if e==KeyError: self.julianTime=data.variables['julianTime'] #exception for nc.dataset type data if e==AttributeError: self.julianTime = data.variables['time'][:] self.matlabTime = self.julianTime[:] + 678942.0 #-Append message to History field start = mattime_to_datetime(self.matlabTime[0]) end = mattime_to_datetime(self.matlabTime[-1]) text = 'Full temporal domain from ' + str(start) +\ ' to ' + str(end) self._History.append(text) #Add time dimension to grid variables grid.ntime = self.julianTime.shape[0] if debug: print 'Full temporal domain' else: #Time period region_t = self._t_region(tx, debug=debug) self._region_time = region_t ts = self._region_time[0] te = self._region_time[-1] + 1 # get time and adjust it to matlab datenum self.julianTime = data.variables['time'][ts:te] self.matlabTime = self.julianTime + 678942.0 #-Append message to History field start = mattime_to_datetime(self.matlabTime[0]) end = mattime_to_datetime(self.matlabTime[-1]) text = 'Temporal domain from ' + str(start) +\ ' to ' + str(end) self._History.append(text) #Add time dimension to grid variables grid.ntime = self.julianTime.shape[0] if debug: print "ntime: ", grid.ntime if debug: print "region_t shape: ", region_t.shape # Define which loading function to use if grid._ax==[] and tx==[]: loadVar = self._load_full_time_full_region for key, aliaS in zip(self._kwl2D, self._al2D): loadVar(data, key, aliaS, debug=debug) for key, aliaS in zip(self._kwl3D, self._al3D): loadVar(data, key, aliaS, debug=debug) else: if grid._ax!=[] and tx!=[]: loadVar = self._load_partial_time_partial_region elif grid._ax==[] and tx!=[]: loadVar = self._load_partial_time_full_region else: loadVar = self._load_full_time_partial_region # Loading 2D variables for key, aliaS in zip(self._kwl2D, self._al2D): loadVar(data, grid, key, aliaS, debug=debug) # Loading 3D variables for key, aliaS in zip(self._kwl3D, self._al3D): loadVar(data, grid, key, aliaS, debug=debug) ##-------Parallelized loading block------- # if debug: startT = time.time() # # divisor = len(self._kwl2D)//self._cpus # remainder = len(self._kwl2D)%self._cpus # # if debug: print "Parallel loading 2D vars..." # if debug: start2D = time.time() # # for i in range(divisor): # start = self._cpus * i # end = start + (self._cpus-1) # processes = [mp.Process(target=loadVar, args=(data, grid, key, aliaS, debug))\ # for key, aliaS in zip(self._kwl2D[start:end], self._al2D[start:end])] # # Run processes # for p in processes: # p.start() # # Exit the completed processes # for p in processes: # p.join() # # # Remaining vars # if remainder != 0: # start = int(-1 * remainder) # processes = [mp.Process(target=loadVar, args=(data, grid, key, aliaS, debug))\ # for key, aliaS in zip(self._kwl2D[start:], self._al2D[start:])] # # Run processes # for p in processes: # p.start() # # Exit the completed processes # for p in processes: # p.join() # # if debug: end2D = time.time() # if debug: print "...processing time: ", (end2D - start2D) # # if debug: print "Parallel loading 3D vars..." # if debug: start3D = time.time() # # for i in range(divisor): # start = self._cpus * i # end = start + (self._cpus-1) # processes = [mp.Process(target=loadVar, args=(data, grid,key, aliaS, debug))\ # for key, aliaS in zip(self._kwl3D[start:end], self._al3D[start:end])] # # Run processes # for p in processes: # p.start() # # Exit the completed processes # for p in processes: # p.join() # # # Remaining vars # if remainder != 0: # start = int(-1 * remainder) # processes = [mp.Process(target=loadVar, args=(data, grid, key, aliaS, debug))\ # for key, aliaS in zip(self._kwl3D[start:], self._al3D[start:])] # # Run processes # for p in processes: # p.start() # # Exit the completed processes # for p in processes: # p.join() # # if debug: end3D = time.time() # if debug: endT = time.time() # if debug: print "...-loading 3D- processing time: ", (end3D - start3D) # if debug: print "...-loading 2D & 3D- processing time: ", (endT - startT) # #-------end------- if debug: print '...Passed' return def _load_full_time_full_region(self, data, key, aliaS, debug=False): """ loading variables for full time and space domains Inputs: - key = FVCOM variable name, str - aliaS = PySeidon variable alias, str Options: - debug = debug flag, boolean """ if debug: print "loading " + str(aliaS) +"..." try: setattr(self, aliaS, data.variables[key].data) except AttributeError: #exeception due nc.Dataset setattr(self, aliaS, data.variables[key]) def _load_partial_time_partial_region(self, data, grid, key, aliaS, debug=False): """ loading variables for partial time and space domains Inputs: - key = FVCOM variable name, str - aliaS = PySeidon variable alias, str Options: - debug = debug flag, boolean """ if debug: print "loading " + str(aliaS) +"..." # define time bounds ts = self._region_time[0] te = self._region_time[-1] + 1 if key == 'zeta': region = grid._node_index horiDim = grid.nnode else: region = grid._element_index horiDim = grid.nele if key == 'verti_shear': vertiDim = grid.nlevel-1 else: vertiDim = grid.nlevel # Find out if using netCDF4 or scipy try: Test = data.variables[key].data self._scipynetcdf = True except AttributeError: # exeception due nc.Dataset self._scipynetcdf = False if self._opendap: # loop over contiguous indexes for opendap H = 0 #local counter for k, g in groupby(enumerate(region), lambda (i,x):i-x): ID = map(itemgetter(1), g) #if debug: print 'Index bound: ' + str(ID[0]) + '-' + str(ID[-1]+1) if key in self._kwl2D: if self._scipynetcdf: #TR : Don't I need to transpose here? var = data.variables[key].data[ts:te,ID[0]:(ID[-1]+1)] else: var = data.variables[key][ts:te,ID[0]:(ID[-1]+1)] if H == 0: setattr(self, aliaS,var) H = 1 else: setattr(self, aliaS, np.hstack((getattr(self, aliaS), var))) else: if self._scipynetcdf: #TR : Don't I need to transpose here? var = data.variables[key].data[ts:te,:,ID[0]:(ID[-1]+1)] else: var = data.variables[key][ts:te,:,ID[0]:(ID[-1]+1)] if H == 0: setattr(self, aliaS,var) H = 1 else: setattr(self, aliaS, np.dstack((getattr(self, aliaS), var))) # TR comment: looping on time indices is a trick from Mitchell O'Flaherty-Sproul to improve loading time else: I = 0 if key in self._kwl2D: setattr(self, aliaS, np.zeros((grid.ntime, horiDim))) for i in self._region_time: if self._scipynetcdf: getattr(self, aliaS)[I,:] = np.transpose(data.variables[key].data[i, region]) else: getattr(self, aliaS)[I,:] = (data.variables[key][i, region]) I += 1 else: setattr(self, aliaS, np.zeros((grid.ntime, vertiDim, horiDim))) for i in self._region_time: if self._scipynetcdf: getattr(self, aliaS)[I,:,:] = np.transpose(data.variables[key].data[i, :, region]) else: getattr(self, aliaS)[I,:,:] = (data.variables[key][i, :, region]) I += 1 def _load_full_time_partial_region(self, data, grid, key, aliaS, debug=False): """ loading variables for full time domain and partial space domain Inputs: - key = FVCOM variable name, str - aliaS = PySeidon variable alias, str Options: - debug = debug flag, boolean """ if debug: print "loading " + str(aliaS) +"..." if key == 'zeta': region = grid._node_index horiDim = grid.nnode else: region = grid._element_index horiDim = grid.nele if key == 'verti_shear': vertiDim = grid.nlevel-1 else: vertiDim = grid.nlevel # Find out if using netCDF4 or scipy try: Test = data.variables[key].data self._scipynetcdf = True except AttributeError: # exeception due nc.Dataset self._scipynetcdf = False if self._opendap: # loop over contiguous indexes for opendap H = 0 #local counter for k, g in groupby(enumerate(region), lambda (i,x):i-x): ID = map(itemgetter(1), g) #if debug: print 'Index bound: ' + str(ID[0]) + '-' + str(ID[-1]+1) if key in self._kwl2D: if self._scipynetcdf: #TR : Don't I need to transpose here? var = data.variables[key].data[:,ID[0]:(ID[-1]+1)] else: var = data.variables[key][:,ID[0]:(ID[-1]+1)] if H == 0: setattr(self, aliaS,var) H = 1 else: setattr(self, aliaS, np.hstack((getattr(self, aliaS), var))) else: if self._scipynetcdf: #TR : Don't I need to transpose here? var = data.variables[key].data[:,:,ID[0]:(ID[-1]+1)] else: var = data.variables[key][:,:,ID[0]:(ID[-1]+1)] if H == 0: setattr(self, aliaS,var) H = 1 else: setattr(self, aliaS, np.dstack((getattr(self, aliaS), var))) else: # TR comment: looping on time indices is a trick from Mitchell O'Flaherty-Sproul to improve loading time if key in self._kwl2D: setattr(self, aliaS, np.zeros((grid.ntime, horiDim))) for i in range(grid.ntime): if self._scipynetcdf: getattr(self, aliaS)[i,:] = np.transpose(data.variables[key].data[i, region]) else: getattr(self, aliaS)[i,:] = (data.variables[key][i, region]) else: setattr(self, aliaS, np.zeros((grid.ntime, vertiDim, horiDim))) for i in range(grid.ntime): if self._scipynetcdf: getattr(self, aliaS)[i,:,:] = np.transpose(data.variables[key].data[i, :, region]) else: getattr(self, aliaS)[i,:,:] = (data.variables[key][i, :, region]) def _load_partial_time_full_region(self, data, grid, key, aliaS, debug=False): """ loading variables for partial time domain and full space domain Inputs: - key = FVCOM variable name, str - aliaS = PySeidon variable alias, str Options: - debug = debug flag, boolean """ if debug: print "loading " + str(aliaS) +"..." # define time bounds ts = self._region_time[0] te = self._region_time[-1] + 1 if key == 'zeta': horiDim = grid.nnode else: horiDim = grid.nele if key == 'verti_shear': vertiDim = grid.nlevel-1 else: vertiDim = grid.nlevel # Find out if using netCDF4 or scipy try: Test = data.variables[key].data self._scipynetcdf = True except AttributeError: # exeception due nc.Dataset self._scipynetcdf = False if self._opendap: if key in self._kwl2D: if self._scipynetcdf: var = data.variables[key].data[ts:te,:] else: var = data.variables[key][ts:te,:] else: if self._scipynetcdf: var = data.variables[key].data[ts:te,:,:] else: var = data.variables[key][ts:te,:,:] setattr(self, aliaS,var) else: I = 0 # TR comment: looping on time indices is a trick from Mitchell O'Flaherty-Sproul to improve loading time if key in self._kwl2D: setattr(self, aliaS, np.zeros((grid.ntime, horiDim))) for i in self._region_time: if self._scipynetcdf: getattr(self, aliaS)[I,:] = np.transpose(data.variables[key].data[i, :]) else: getattr(self, aliaS)[I,:] = (data.variables[key][i, :]) I += 1 else: setattr(self, aliaS, np.zeros((grid.ntime, vertiDim, horiDim))) for i in self._region_time: if self._scipynetcdf: getattr(self, aliaS)[I,:,:] = np.transpose(data.variables[key].data[i, :, :]) else: getattr(self, aliaS)[I,:,:] = (data.variables[key][i, :, :]) I += 1 def _t_region(self, tx, debug=False): """Return time indices included in time period, aka tx""" debug = debug or self._debug if debug: print 'Computing region_t...' start = datetime.datetime.strptime(tx[0], '%Y-%m-%d %H:%M:%S') end = datetime.datetime.strptime(tx[1], '%Y-%m-%d %H:%M:%S') region_t = time_to_index(start, end, self.julianTime[:], debug=debug) if debug: print '...Passed' print '-Now working in time box-' return region_t class _load_grid: """ **'Grid' subset in FVCOM class contains grid related quantities** Some grid data are directly passed on from FVCOM output: :: _lon = longitudes at nodes (deg.), 2D array (ntime, nnode) |_lonc = longitudes at elements (deg.), 2D array (ntime, nele) |_lat = latitudes at nodes (deg.), 2D array (ntime, nnode) |_latc = latitudes at elements (deg.), 2D array (ntime, nele) |_x = x coordinates at nodes (m), 2D array (ntime, nnode) |_xc = x coordinates at elements (m), 2D array (ntime, nele) |_y = y coordinates at nodes (m), 2D array (ntime, nnode) |_yc = y coordinates at nodes (m), 2D array (ntime, nele) FVCOM.Grid._|_h = bathymetry (m), 2D array (ntime, nnode) |_nele = element dimension, integer |_nnode = node dimension, integer |_nlevel = vertical level dimension, integer |_ntime = time dimension, integer |_trinodes = surrounding node indices, 2D array (3, nele) |_triele = surrounding element indices, 2D array (3, nele) |_siglay = sigma layers, 2D array (nlevel, nnode) |_siglay = sigma levels, 2D array (nlevel+1, nnode) |_and a all bunch of grid parameters... | i.e. a1u, a2u, aw0, awx, awy Some others shall be generated as methods are being called, ex: :: ... |_triangle = triangulation object for plotting purposes """ def __init__(self, data, ax, History, debug=False): self._debug = debug if debug: print 'Loading grid...' #Pointer to History setattr(self, '_History', History) #list of grid variable gridvar = ['lon','lat','lonc','latc','x','y','xc','yc', 'a1u','a2u','aw0','awx','awy'] for key in gridvar: try: setattr(self, key, data.variables[key].data) except AttributeError: #exception for nc.dataset type data setattr(self, key, data.variables[key])#[:]) #special treatment for triele & trinodes due to Save_as(netcdf) datavar = data.variables.keys() if "trinodes" in datavar: try: setattr(self, 'trinodes', data.variables['trinodes'].data) except AttributeError: #exception for nc.dataset type data setattr(self, 'trinodes', data.variables['trinodes'])#[:]) else: try: self.trinodes = np.transpose(data.variables['nv'].data) - 1 except AttributeError: #exception for nc.dataset type data self.trinodes = np.transpose(data.variables['nv'][:]) - 1 if "triele" in datavar: try: setattr(self, 'triele', data.variables['triele'].data) except AttributeError: #exception for nc.dataset type data setattr(self, 'triele', data.variables['triele'])#[:]) else: try: self.triele = np.transpose(data.variables['nbe'].data) - 1 except AttributeError: #exception for nc.dataset type data self.triele = np.transpose(data.variables['nbe'][:]) - 1 #special treatment for depth2D & depth due to Save_as(netcdf) if "depth2D" in datavar: setattr(self, "depth2D", data.variables["depth2D"])#[:]) if "depth" in datavar: setattr(self, "depth", data.variables["depth"])#[:]) if ax==[]: #Define bounding box self._ax = [] #Append message to History field text = 'Full spatial domain' self._History.append(text) #Define the rest of the grid variables self.h = data.variables['h'][:] self.siglay = data.variables['siglay'][:] self.siglev = data.variables['siglev'][:] self.nlevel = self.siglay.shape[0] self.nele = self.lonc.shape[0] self.nnode = self.lon.shape[0] else: #Checking for pre-defined regions if ax=='GP': ax=[-66.36, -66.31, 44.24, 44.3] elif ax=='PP': ax=[-66.23, -66.19, 44.37, 44.41] elif ax=='DG': ax=[-65.84, -65.73, 44.64, 44.72] elif ax=='MP': ax=[-65.5, -63.3, 45.0, 46.0] print 'Re-indexing may take some time...' Data = regioner(self, ax, debug=debug) #list of grid variable gridvar = ['lon','lat','lonc','latc','x','y','xc','yc', 'a1u','a2u','aw0','awx','awy','nv','nbe'] for key in gridvar: setattr(self, key, Data[key][:]) #Special treatment here self.trinodes = Data['nv'][:] self.triele = Data['nbe'][:] self.triangle = Data['triangle'] #Only load the element within the box self._node_index = Data['node_index'] self._element_index = Data['element_index'] #different loading technique if using OpenDap server if type(data.variables).__name__=='DatasetType': #Split into consecutive integers to optimise loading #TR comment: data.variables['ww'].data[:,:,region_n] doesn't # work with non consecutive indices H=0 for k, g in groupby(enumerate(self._node_index), lambda (i,x):i-x): ID = map(itemgetter(1), g) #if debug: print 'Index bound: ' + str(ID[0]) + '-' + str(ID[-1]+1) if H==0: try: self.h = data.variables['h'].data[ID[0]:(ID[-1]+1)] self.siglay = data.variables['siglay'].data[:,ID[0]:(ID[-1]+1)] self.siglev = data.variables['siglev'].data[:,ID[0]:(ID[-1]+1)] except AttributeError: #exception for nc.dataset type data self.h = data.variables['h'][ID[0]:(ID[-1]+1)] self.siglay = data.variables['siglay'][:,ID[0]:(ID[-1]+1)] self.siglev = data.variables['siglev'][:,ID[0]:(ID[-1]+1)] else: try: self.h = np.hstack((self.h, data.variables['h'].data[ID[0]:(ID[-1]+1)])) self.siglay = np.hstack((self.siglay, data.variables['siglay'].data[:,ID[0]:(ID[-1]+1)])) self.siglev = np.hstack((self.siglev, data.variables['siglev'].data[:,ID[0]:(ID[-1]+1)])) except AttributeError: #exception for nc.dataset type data self.h = np.hstack((self.h, data.variables['h'][ID[0]:(ID[-1]+1)])) self.siglay = np.hstack((self.siglay, data.variables['siglay'][:,ID[0]:(ID[-1]+1)])) self.siglev = np.hstack((self.siglev, data.variables['siglev'][:,ID[0]:(ID[-1]+1)])) H=1 else: try: self.h = data.variables['h'].data[self._node_index] self.siglay = data.variables['siglay'].data[:,self._node_index] self.siglev = data.variables['siglev'].data[:,self._node_index] except AttributeError: #exception for nc.dataset type data self.h = data.variables['h'][self._node_index] self.siglay = data.variables['siglay'][:,self._node_index] self.siglev = data.variables['siglev'][:,self._node_index] #Dimensions self.nlevel = self.siglay.shape[0] self.nele = Data['element_index'].shape[0] self.nnode = Data['node_index'].shape[0] del Data #Define bounding box self._ax = ax # Add metadata entry text = 'Bounding box =' + str(ax) self._History.append(text) print '-Now working in bounding box-' if debug: print '...Passed' return
TheKingInYellow/PySeidon
pyseidon/fvcomClass/variablesFvcom.py
Python
agpl-3.0
28,664
[ "NetCDF" ]
4aacc854f99d26cacd2d2393097ddc75318c3b89e3e1dae5d0297a86f62da586
#!/usr/bin/env python3 #* This file is part of the MOOSE framework #* https://www.mooseframework.org #* #* All rights reserved, see COPYRIGHT for full restrictions #* https://github.com/idaholab/moose/blob/master/COPYRIGHT #* #* Licensed under LGPL 2.1, please see LICENSE for details #* https://www.gnu.org/licenses/lgpl-2.1.html import sys import unittest from PyQt5 import QtWidgets, QtGui, QtCore from peacock.ExodusViewer.plugins.BackgroundPlugin import main from peacock.utils import Testing, qtutils from mooseutils import message class TestBackgroundPlugin(Testing.PeacockImageTestCase): """ Testing for MeshControl widget. """ qapp = QtWidgets.QApplication(sys.argv) def setUp(self): """ Creates a window attached to FilePlugin widget. """ message.MOOSE_TESTING_MODE = True qtutils.setAppInformation("peacock_backgroundplugin") settings = QtCore.QSettings() settings.clear() settings.sync() def createWidget(self): # The file to open self._filename = Testing.get_chigger_input('mug_blocks_out.e') self._widget, self._window, self._main = main(size=[600,600]) self._window.onSetFilename(self._filename) self._window.onSetVariable('diffused') self._window.onWindowRequiresUpdate() def testInitial(self): """ Test the initial state of the widget. """ self.createWidget() bottom = self._window._window[0].getVTKRenderer().GetBackground() bottom_exact = 0.7058823529411765 for i in range(3): self.assertAlmostEqual(bottom[i], bottom_exact) top = self._window._window[0].getVTKRenderer().GetBackground2() top_exact = 0.43529411764705883 for i in range(3): self.assertAlmostEqual(top[i], top_exact) self.assertImage('testInitial.png') def testChangeTop(self): """ Test changing top color. """ self.createWidget() self._widget.BackgroundPlugin._top = QtGui.QColor(0,255,0) self._widget.BackgroundPlugin.updateOptions() self._widget.BackgroundPlugin.windowRequiresUpdate.emit() self.assertImage('testTopColor.png') def testChangeBottom(self): """ Test changing bottom color. """ self.createWidget() self._widget.BackgroundPlugin._bottom = QtGui.QColor(0,0,255) self._widget.BackgroundPlugin.updateOptions() self._widget.BackgroundPlugin.windowRequiresUpdate.emit() self.assertImage('testBottomColor.png') def testSolidColor(self): """ Test gradient toggle. """ self.createWidget() self._widget.BackgroundPlugin.GradientToggle.setChecked(False) self._widget.BackgroundPlugin.GradientToggle.toggled.emit(False) self._widget.BackgroundPlugin._solid = QtGui.QColor(255,0,0) self._widget.BackgroundPlugin.updateOptions() self._widget.BackgroundPlugin.windowRequiresUpdate.emit() self.assertImage('testSolidColor.png') def testTopColorPrefs(self): """ Test that the preferences work """ settings = QtCore.QSettings() settings.setValue("exodus/gradientTopColor", QtGui.QColor(0, 255, 0).name()) settings.sync() self.createWidget() self.assertImage('testTopColor.png') def testBottomColorPrefs(self): """ Test that the preferences work """ settings = QtCore.QSettings() settings.setValue("exodus/gradientBottomColor", QtGui.QColor(0, 0, 255).name()) settings.sync() self.createWidget() self.assertImage('testBottomColor.png') def testSolidColorPrefs(self): """ Test that the preferences work """ settings = QtCore.QSettings() settings.setValue("exodus/solidBackgroundColor", QtGui.QColor(255, 0, 0).name()) settings.setValue("exodus/backgroundGradient", False) settings.sync() self.createWidget() self._widget.BackgroundPlugin.GradientToggle.setChecked(False) self._widget.BackgroundPlugin.GradientToggle.toggled.emit(False) self._widget.BackgroundPlugin.updateOptions() self._widget.BackgroundPlugin.windowRequiresUpdate.emit() self.assertImage('testSolidColor.png') def testPreset(self): self.createWidget() self._widget.BackgroundPlugin.BlackPreset.setChecked(True) self._widget.BackgroundPlugin.BlackPreset.toggled.emit(True) self.assertImage('testBlackToggle.png') self._widget.BackgroundPlugin.WhitePreset.setChecked(True) self._widget.BackgroundPlugin.WhitePreset.toggled.emit(True) self.assertImage('testWhiteToggle.png') self._widget.BackgroundPlugin.WhitePreset.setChecked(False) self._widget.BackgroundPlugin.WhitePreset.toggled.emit(False) self.assertImage('testInitial.png') def testBlackFontToggle(self): self.createWidget() self._widget.BackgroundPlugin.ColorbarBlackFontToggle.setChecked(True) self._widget.BackgroundPlugin.ColorbarBlackFontToggle.toggled.emit(True) self.assertImage('testBlackFont.png') if __name__ == '__main__': unittest.main(module=__name__, verbosity=2)
nuclear-wizard/moose
python/peacock/tests/exodus_tab/test_BackgroundPlugin.py
Python
lgpl-2.1
5,345
[ "MOOSE" ]
dd9b8a48545c5c107f3f120f452d9d218e6df62bd5fb0b550277ffefd1eec443
############################################################################### # This file is part of GALARIO: # # Gpu Accelerated Library for Analysing Radio Interferometer Observations # # # # Copyright (C) 2017-2020, Marco Tazzari, Frederik Beaujean, Leonardo Testi. # # # # This program is free software: you can redistribute it and/or modify # # it under the terms of the Lesser GNU General Public License as published by # # the Free Software Foundation, either version 3 of the License, or # # (at your option) any later version. # # # # This program is distributed in the hope that it will be useful, # # but WITHOUT ANY WARRANTY; without even the implied warranty of # # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # # # # For more details see the LICENSE file. # # For documentation see https://mtazzari.github.io/galario/ # ############################################################################### #!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import (division, print_function, absolute_import, unicode_literals) import numpy as np from scipy.interpolate import interp1d, RectBivariateSpline from scipy.integrate import trapz, quadrature __all__ = ["py_sampleImage", "py_sampleProfile", "py_chi2Profile", "py_chi2Image", "radial_profile", "g_sweep_prototype", "sweep_ref", "create_reference_image", "create_sampling_points", "uv_idx", "uv_idx_r2c", "int_bilin_MT", "matrix_size", "apply_phase_array", "generate_random_vis", "unique_part", "assert_allclose", "apply_rotation"] def py_sampleImage(reference_image, dxy, udat, vdat, dRA=0., dDec=0., PA=0., origin='upper'): """ Python implementation of sampleImage. """ if origin == 'upper': v_origin = 1. elif origin == 'lower': v_origin = -1. nxy = reference_image.shape[0] dRA *= 2.*np.pi dDec *= 2.*np.pi du = 1. / (nxy*dxy) # Real to Complex transform fft_r2c_shifted = np.fft.fftshift( np.fft.rfft2( np.fft.fftshift(reference_image)), axes=0) # apply rotation cos_PA = np.cos(PA) sin_PA = np.sin(PA) urot = udat * cos_PA - vdat * sin_PA vrot = udat * sin_PA + vdat * cos_PA dRArot = dRA * cos_PA - dDec * sin_PA dDecrot = dRA * sin_PA + dDec * cos_PA # interpolation indices uroti = np.abs(urot)/du vroti = nxy/2. + v_origin * vrot/du uneg = urot < 0. vroti[uneg] = nxy/2 - v_origin * vrot[uneg]/du # coordinates of FT u_axis = np.linspace(0., nxy // 2, nxy // 2 + 1) v_axis = np.linspace(0., nxy - 1, nxy) # We use RectBivariateSpline to do only linear interpolation, which is faster # than interp2d for our case of a regular grid. # RectBivariateSpline does not work for complex input, so we need to run it twice. f_re = RectBivariateSpline(v_axis, u_axis, fft_r2c_shifted.real, kx=1, ky=1, s=0) ReInt = f_re.ev(vroti, uroti) f_im = RectBivariateSpline(v_axis, u_axis, fft_r2c_shifted.imag, kx=1, ky=1, s=0) ImInt = f_im.ev(vroti, uroti) f_amp = RectBivariateSpline(v_axis, u_axis, np.abs(fft_r2c_shifted), kx=1, ky=1, s=0) AmpInt = f_amp.ev(vroti, uroti) # correct for Real to Complex frequency mapping uneg = urot < 0. ImInt[uneg] *= -1. PhaseInt = np.angle(ReInt + 1j*ImInt) # apply the phase change theta = urot*dRArot + vrot*dDecrot vis = AmpInt * (np.cos(theta+PhaseInt) + 1j*np.sin(theta+PhaseInt)) return vis def py_sampleProfile(intensity, Rmin, dR, nxy, dxy, udat, vdat, dRA=0., dDec=0., PA=0, inc=0.): """ Python implementation of sampleProfile. """ inc_cos = np.cos(inc) nrad = len(intensity) gridrad = np.linspace(Rmin, Rmin + dR * (nrad - 1), nrad) ncol, nrow = nxy, nxy # create the mesh grid x = (np.linspace(0.5, -0.5 + 1./float(ncol), ncol)) * dxy * ncol y = (np.linspace(0.5, -0.5 + 1./float(nrow), nrow)) * dxy * nrow # we shrink the x axis, since PA is the angle East of North of the # the plane of the disk (orthogonal to the angular momentum axis) # PA=0 is a disk with vertical orbital node (aligned along North-South) x_axis, y_axis = np.meshgrid(x / inc_cos, y) x_meshgrid = np.sqrt(x_axis ** 2. + y_axis ** 2.) # convert to Jansky sr_to_px = dxy**2. intensity *= sr_to_px f = interp1d(gridrad, intensity, kind='linear', fill_value=0., bounds_error=False, assume_sorted=True) intensmap = f(x_meshgrid) intensmap[nrow//2, ncol//2] = central_pixel(intensity, Rmin, dR, dxy) vis = py_sampleImage(intensmap, dxy, udat, vdat, PA=PA, dRA=dRA, dDec=dDec) return vis def py_chi2Image(reference_image, dxy, udat, vdat, vis_obs_re, vis_obs_im, weights, dRA=0., dDec=0., PA=0.): """ Python implementation of chi2Image. """ vis = py_sampleImage(reference_image, dxy, udat, vdat, PA=PA, dRA=dRA, dDec=dDec) chi2 = np.sum(((vis.real - vis_obs_re)**2. + (vis.imag - vis_obs_im)**2.)*weights) return chi2 def py_chi2Profile(intensity, Rmin, dR, nxy, dxy, udat, vdat, vis_obs_re, vis_obs_im, weights, dRA=0., dDec=0., PA=0, inc=0.): """ Python implementation of chi2Profile. """ vis = py_sampleProfile(intensity, Rmin, dR, nxy, dxy, udat, vdat, inc=inc, PA=PA, dRA=dRA, dDec=dDec) chi2 = np.sum(((vis.real - vis_obs_re)**2. + (vis.imag - vis_obs_im)**2.)*weights) return chi2 def radial_profile(Rmin, delta_R, nrad, mode='Gauss', dtype='float64', gauss_width=100): """ Compute a radial brightness profile. Returns intensity in Jy/sr """ gridrad = np.linspace(Rmin, Rmin + delta_R * (nrad - 1), nrad).astype(dtype) if mode == 'Gauss': # a simple Gaussian intensity = np.exp(-(gridrad/gauss_width)**2) elif mode == 'Cos-Gauss': # a cos-tapered Gaussian intensity = np.cos(2.*np.pi*gridrad/(gauss_width))**2. * np.exp(-(gridrad/gauss_width)**2) return intensity def central_pixel(I, Rmin, dR, dxy): """ Compute brightness in the central pixel as the average flux in the pixel. """ # with quadrature method: tends to over-estimate it # area = np.pi*((dxy/2.)**2-Rmin**2) # flux, _ = quadrature(lambda z: f(z)*z, Rmin, dxy/2., tol=1.49e-25, maxiter=200) # flux *= 2.*np.pi # intensmap[int(nrow/2+Dy/dxy), int(ncol/2-Dx/dxy)] = flux/area # with trapezoidal rule: it's the same implementation as in galario.cpp iIN = int(np.floor((dxy / 2 - Rmin) // dR)) flux = 0. for i in range(1, iIN): flux += (Rmin + dR * i) * I[i] flux *= 2. flux += Rmin * I[0] + (Rmin + iIN * dR) * I[iIN] flux *= dR # add flux between Rmin+iIN*dR and dxy/2 I_interp = (I[iIN + 1] - I[iIN]) / (dR) * (dxy / 2. - (Rmin + dR * (iIN))) + \ I[iIN] # brightness at R=dxy/2 flux += ((Rmin + iIN * dR) * I[iIN] + dxy / 2. * I_interp) * ( dxy / 2. - (Rmin + iIN * dR)) # flux *= 2 * np.pi / 2. # to complete trapezoidal rule (***) area = (dxy / 2.) ** 2 - Rmin ** 2 # area *= np.pi # elides (***) return flux / area def g_sweep_prototype(I, Rmin, dR, nrow, ncol, dxy, inc, dtype_image='float64'): """ Prototype of the sweep function for galario. """ assert Rmin <= dxy, "Rmin must be smaller or equal than dxy" image = np.zeros((nrow, ncol), dtype=dtype_image) nrad = len(I) irow_center = nrow // 2 icol_center = ncol // 2 inc_cos = np.cos(inc) # radial extent in number of image pixels covered by the profile rmax = min(np.int(np.ceil((Rmin+nrad*dR)/dxy)), irow_center) row_offset = irow_center-rmax col_offset = icol_center-rmax for irow in range(rmax*2): for jcol in range(rmax*2): x = (rmax - jcol) * dxy y = (rmax - irow) * dxy rr = np.sqrt((x/inc_cos)**2. + (y)**2.) # interpolate 1D iR = np.int(np.floor((rr-Rmin) / dR)) if iR >= nrad-1: image[irow+row_offset, jcol+col_offset] = 0. else: image[irow+row_offset, jcol+col_offset] = I[iR] + (rr - iR * dR - Rmin) * (I[iR + 1] - I[iR]) / dR # central pixel image[irow_center, icol_center] = central_pixel(I, Rmin, dR, dxy) sr_to_px = dxy**2. image *= sr_to_px return image def sweep_ref(I, Rmin, dR, nrow, ncol, dxy, inc, Dx=0., Dy=0., dtype_image='float64', origin='upper'): """ Compute the intensity map (i.e. the image) given the radial profile I(R). We assume an axisymmetric profile. The origin of the output image is in the upper left corner. Parameters ---------- I: 1D float array Intensity radial profile I(R). Rmin : float Inner edge of the radial grid. At R=Rmin the intensity is intensity[0]. For R<Rmin the intensity is assumed to be 0. **units**: rad dR : float Size of the cell of the radial grid, assumed linear. **units**: rad nrow : int Number of rows of the output image. **units**: pixel ncol : int Number of columns of the output image. **units**: pixel dxy : float Size of the image cell, assumed equal and uniform in both x and y direction. **units**: rad inc : float Inclination along North-South axis. **units**: rad Dx : optional, float Right Ascension offset (positive towards East, left). **units**: rad Dy : optional, float Declination offset (positive towards North, top). **units**: rad dtype_image : optional, str numpy dtype specification for the output image. origin: ['upper' | 'lower'], optional, default: 'upper' Set the [0,0] index of the array in the upper left or lower left corner of the axes. Returns ------- intensmap: 2D float array The intensity map, sweeped by 2pi. """ if origin == 'upper': v_origin = 1. elif origin == 'lower': v_origin = -1. inc_cos = np.cos(inc) nrad = len(I) gridrad = np.linspace(Rmin, Rmin + dR * (nrad - 1), nrad) # create the mesh grid x = (np.linspace(0.5, -0.5 + 1./float(ncol), ncol)) * dxy * ncol y = (np.linspace(0.5, -0.5 + 1./float(nrow), nrow)) * dxy * nrow * v_origin # we shrink the x axis, since PA is the angle East of North of the # the plane of the disk (orthogonal to the angular momentum axis) # PA=0 is a disk with vertical orbital node (aligned along North-South) xxx, yyy = np.meshgrid((x - Dx) / inc_cos, (y - Dy)) x_meshgrid = np.sqrt(xxx ** 2. + yyy ** 2.) f = interp1d(gridrad, I, kind='linear', fill_value=0., bounds_error=False, assume_sorted=True) intensmap = f(x_meshgrid) # central pixel: compute the average brightness intensmap[int(nrow / 2 + Dy / dxy * v_origin), int(ncol / 2 - Dx / dxy)] = central_pixel(I, Rmin, dR, dxy) # convert to Jansky intensmap *= dxy**2. return intensmap.astype(dtype_image) def create_reference_image(size, x0=10., y0=-3., sigma_x=50., sigma_y=30., dtype='float64', reverse_xaxis=False, correct_axes=True, sizey=None, **kwargs): """ Creates a reference image: a gaussian intensity with elliptical """ inc_cos = np.cos(0./180.*np.pi) delta_x = 1. x = (np.linspace(0., size - 1, size) - size / 2.) * delta_x if sizey: y = (np.linspace(0., sizey-1, sizey) - sizey/2.) * delta_x else: y = x.copy() if reverse_xaxis: xx, yy = np.meshgrid(-x, y/inc_cos) elif correct_axes: xx, yy = np.meshgrid(-x, -y/inc_cos) else: xx, yy = np.meshgrid(x, y/inc_cos) image = np.exp(-(xx-x0)**2./sigma_x - (yy-y0)**2./sigma_y) return image.astype(dtype) def create_sampling_points(nsamples, maxuv=1., dtype='float64'): # TODO make this generator smarter assert isinstance(nsamples, int) minuv = maxuv/100. # change to 10000 to have nxy=4096 np.random.seed(42) # columns are non contiguous arrays => copy uvdist = np.random.uniform(low=minuv, high=maxuv, size=nsamples) phi = np.random.uniform(low=0., high=2.*np.pi, size=nsamples) u = uvdist * np.cos(phi) v = uvdist * np.sin(phi) return u.astype(dtype), v.astype(dtype) def uv_idx(udat, vdat, du, half_size): """ For C2C transform. uv coordinates to pixel coordinates in range [0, npixels]. Assume image is square, same boundary in u and v direction. """ return half_size + udat/du, half_size + vdat/du def uv_idx_r2c(udat, vdat, du, half_size): """ For R2C transform. uv coordinates to pixel coordinates in range [0, npixels]. Assume image is square, same boundary in u and v direction. """ indu = np.abs(udat) / du indv = half_size + vdat / du uneg = udat < 0. indv[uneg] = half_size - vdat[uneg] / du return indu, indv def int_bilin_MT(f, x, y): # assume x, y are in pixel vis_int = np.zeros(len(x)) for i in range(len(x)): t = y[i] - np.floor(y[i]) u = x[i] - np.floor(x[i]) y0 = f[np.int(np.floor(y[i])), np.int(np.floor(x[i]))] y1 = f[np.int(np.floor(y[i])) + 1, np.int(np.floor(x[i]))] y2 = f[np.int(np.floor(y[i])) + 1, np.int(np.floor(x[i])) + 1] y3 = f[np.int(np.floor(y[i])), np.int(np.floor(x[i])) + 1] vis_int[i] = t * u * (y0 - y1 + y2 - y3) vis_int[i] += t * (y1 - y0) vis_int[i] += u * (y3 - y0) vis_int[i] += y0 return vis_int def matrix_size(udat, vdat, **kwargs): maxuv_factor = kwargs.get('maxuv_factor', 4.8) minuv_factor = kwargs.get('minuv_factor', 4.) uvdist = np.sqrt(udat**2 + vdat**2) maxuv = max(uvdist)*maxuv_factor minuv = min(uvdist)/minuv_factor minpix = np.uint(maxuv/minuv) Nuv = kwargs.get('force_nx', int(2**np.ceil(np.log2(minpix)))) return Nuv, minuv, maxuv def apply_phase_array(u, v, vis_int, x0, y0): """ Performs a translation in the real space by applying a phase shift in the Fourier space. This function applies the shift to data points sampling the Fourier transform of an image. Parameters ---------- u, v: 1D float array Coordinates of points in the Fourier space. units: observing wavelength vis_int: 1D float array, complex Fourier Transform sampled in the (u, v) points. Re, Im, u, v must have the same length. x0, y0: floats, rad Shifts in the real space. Returns ------- vis_int_shifted: 1D float array, complex Phase-shifted of the Fourier Transform sampled in the (u, v) points. """ x0 *= 2.*np.pi y0 *= 2.*np.pi # construct the phase change theta = u*x0 + v*y0 # apply the phase change vis_int_shifted = vis_int * (np.cos(theta) + 1j*np.sin(theta)) return vis_int_shifted def generate_random_vis(nsamples, dtype): x = 3. * np.random.uniform(low=0., high=1., size=nsamples).astype(dtype) + 2.8 +\ 1j * np.random.uniform(low=0., high=1., size=nsamples).astype(dtype) + 8.2 y = 8. * np.random.uniform(low=0.5, high=3., size=nsamples).astype(dtype) + 5.7 +\ 1j * np.random.uniform(low=0., high=6., size=nsamples).astype(dtype) + 21.2 w = np.random.uniform(low=0., high=1e4, size=nsamples).astype(dtype) w /= w.sum() return x, y, w def apply_rotation(PA, dRA, dDec, udat, vdat): """ Rotates the RA, Dec offsets and the udat and vdat coordinates by Position Angle PA """ # PA: rad cos_PA = np.cos(PA) sin_PA = np.sin(PA) urot = udat * cos_PA - vdat * sin_PA vrot = udat * sin_PA + vdat * cos_PA dRArot = dRA * cos_PA - dDec * sin_PA dDecrot = dRA * sin_PA + dDec * cos_PA return dRArot, dDecrot, urot, vrot def unique_part(array): """Extract the unique part of a real-to-complex Fourier transform""" return array[:, 0:int(array.shape[1]/2)+1] def assert_allclose(x, y, rtol=1e-10, atol=1e-8): """Drop in replacement for `numpy.testing.assert_allclose` that shows the nonmatching elements""" if np.isscalar(x) and np.isscalar(y) == 1: return np.testing.assert_allclose(x, y, rtol=rtol, atol=atol) if x.shape != y.shape: raise AssertionError("Shape mismatch: %s vs %s" % (str(x.shape), str(y.shape))) d = ~np.isclose(x, y, rtol, atol) if np.any(d): miss = np.where(d)[0] raise AssertionError("""Mismatch of %d elements (%g %%) at the level of rtol=%g, atol=%g %s %s %s""" % (len(miss), len(miss)/x.size, rtol, atol, repr(miss), str(x[d]), str(y[d])))
mtazzari/galario
python/utils.py
Python
lgpl-3.0
17,223
[ "Gaussian" ]
44f8f819feeab42d6e9b6d942dcbeb9104a00d4b36425f83fdc9506a4f7ba947
# wfn.py -- Wavefunctions # # Copyright (c) 2016 Steven Vancoillie # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # Written by Steven Vancoillie # # Modified by Marcus Johansson to generate initial MOs from SALCs. # import numpy as np from collections import namedtuple from scipy import linalg as la import re from . import export from .basis import BasisSet from .orbitals import OrbitalSet from .mh5 import MolcasHDF5 from .inporb import MolcasINPORB from .errors import Error, DataNotAvailable try: import libmsym as msym except ImportError: msym = None @export class Wavefunction(): def __init__(self, mo, basis_set, salcs=None, densities=None, spindens=None, spinmult=None, n_bas=None, n_sym=None): self.mo = mo if 'alpha' in mo and 'beta' in mo: self.unrestricted = True elif 'restricted' in mo: self.unrestricted = False else: raise Exception('invalid key(s) in mo dict') self.basis_set = basis_set self.salcs = salcs self.densities = densities self.spindens = spindens self.spinmult = spinmult self.n_bas = n_bas self.n_sym = n_sym def electronic_info(self): ''' return a tuple containing the total number of electrons, the number of alpha electrons, the number of beta electrons, and the spin multiplicity. When occupation numbers are not available (i.e. NaNs), the number of electrons will be set to represent a neutral system. ''' if self.unrestricted: n_alpha = int(np.sum(self.mo['alpha'].occupations)) n_beta = int(np.sum(self.mo['beta'].occupations)) n_electrons = n_alpha + n_beta if self.spinmult is None: spinmult = n_alpha - n_beta + 1 else: spinmult = self.spinmult else: n_electrons = np.sum(self.mo['restricted'].occupations) if self.spinmult is None: spinmult = 1 else: spinmult = self.spinmult n_beta = (n_electrons - (spinmult - 1)) // 2 n_alpha = n_electrons - n_beta electronic_charge = -n_electrons return (n_electrons, n_alpha, n_beta, spinmult, electronic_charge) def nuclear_info(self): ''' return a tuple containing the total number of atoms and nuclear charge ''' n_atoms = len(self.basis_set.center_labels) nuclear_charge = int(np.sum(self.basis_set.center_charges)) return (n_atoms, nuclear_charge) def print_orbitals(self, types=None, erange=None, pattern=None, order=None, threshold=None, weights=False): for kind in ('restricted', 'alpha', 'beta'): if kind not in self.mo: continue else: orbitals = self.mo[kind] if types is not None: orbitals = orbitals.type(*types) if erange is not None: orbitals = orbitals.erange(*erange) if pattern is not None: orbitals = orbitals.pattern(pattern) if order is not None: orbitals = orbitals.sort_basis(order=order) if weights: orbitals = orbitals.gpop() self._print_mo_header(kind=kind) if self.n_sym > 1: orbitals.show_by_irrep(threshold=threshold) else: orbitals.show(threshold=threshold) def print_symmetry_species(self, types=None, erange=None, pattern=None, order=None): for kind in ('restricted', 'alpha', 'beta'): if kind not in self.mo: continue else: orbitals = self.mo[kind] if types is not None: orbitals = orbitals.type(*types) if erange is not None: orbitals = orbitals.erange(*erange) if pattern is not None: orbitals = orbitals.pattern(pattern) if order is not None: orbitals = orbitals.sort_basis(order=order) orbitals.show_symmetry_species() def guessorb(self): """ return a set of molecular orbitals that diagonalize the atomic fock matrix. """ if self.basis_set.overlap is None or self.basis_set.fockint is None: raise DataNotAvailable('guessorb is missing the overlap and/or fockint matrix') guessorb = {} Smat_ao = np.asmatrix(self.basis_set.overlap) Fmat_ao = np.asmatrix(self.basis_set.fockint) Fmat_ao = Smat_ao.T * Fmat_ao * Smat_ao for kind in self.mo.keys(): C_mo = np.asmatrix(self.mo[kind].coefficients) E_mo = np.empty(len(self.mo[kind].energies)) irreps = self.mo[kind].irreps.copy() for irrep in np.unique(irreps): mo_set, = np.where(irreps == irrep) Cmat = C_mo[:,mo_set] Smat_mo = Cmat.T * Smat_ao * Cmat # orthonormalize s,U = np.linalg.eigh(Smat_mo) U_lowdin = U * np.diag(1/np.sqrt(s)) * U.T Cmat = Cmat * U_lowdin # diagonalize metric Fock Fmat_mo = Cmat.T * Fmat_ao * Cmat f,U = np.linalg.eigh(Fmat_mo) Cmat = Cmat * U # copy back to correct supsym id C_mo[:,mo_set] = Cmat E_mo[mo_set] = f # finally, create new orbital set with new coefficients and energies mo_order = np.argsort(E_mo) guessorb[kind] = OrbitalSet(C_mo[:,mo_order], energies=E_mo[mo_order], irreps=irreps[mo_order], basis_set=self.mo[kind].basis_set) return guessorb def salcorb(self, kind='restricted'): """ generate a set of initial molecular orbitals from SALCs """ if msym is None: raise ImportError('no libmsym installation found') symorb = {} Smat_ao = np.asmatrix(self.basis_set.overlap) Fmat_ao = np.asmatrix(self.basis_set.fockint) bs = self.basis_set elements = [msym.Element(coordinates = Coord, charge = int(Charge)) for Coord, Charge in zip(bs.center_coordinates, bs.center_charges)] basis_functions = [msym.RealSphericalHarmonic(element = elements[element_id-1], n = n + l, l = l, m = m) for [element_id, n, l, m] in bs.contracted_ids] E_salcs = np.empty(len(basis_functions)) supsym = np.empty(len(basis_functions), dtype=np.int_) with msym.Context(elements = elements, basis_functions = basis_functions) as ctx: point_group = ctx.find_symmetry() species_names = [s.name for s in ctx.character_table.symmetry_species] (C_salcs, salc_species, partner_functions) = ctx.salcs C_salcs = C_salcs.T salc_components = np.array([pf.dim for pf in partner_functions]) salc_sc = [(s, np.sort(np.unique(salc_components[salc_species == s]))) for s in np.unique(salc_species)] salc_sci = {} for (species, components) in salc_sc: salc_sci.update({(species,c):len(salc_sci) + i for i, c in enumerate(components)}) comp_head, *comp_tail = components select_species = species == salc_species select_comp = {c:np.where(np.all([select_species, salc_components == c], axis=0))[0] for c in components} Cmat = {c:C_salcs[:,select_comp[c]] for c in components} Smat_mo = Cmat[comp_head].T * Smat_ao * Cmat[comp_head] # average out symmetry breaking in overlap for component in comp_tail: Smat_mo += Cmat[component].T * Smat_ao * Cmat[component] Smat_mo /= len(components) # orthonormalize overlap s,U = np.linalg.eigh(Smat_mo) U_lowdin = U * np.diag(1/np.sqrt(s)) * U.T for component in components: Cmat[component] = Cmat[component] * U_lowdin # average out symmetry breaking in metric Fock Fmat_mo = Cmat[comp_head].T * Smat_ao.T * Fmat_ao * Smat_ao * Cmat[comp_head] for component in comp_tail: Fmat_mo += Cmat[component].T * Smat_ao.T * Fmat_ao * Smat_ao * Cmat[component] Fmat_mo /= len(components) # diagonalize metric Fock f,U = np.linalg.eigh(Fmat_mo) for component in components: Cmat[component] = Cmat[component] * U # update components, energies and supsym select = select_comp[component] supsym[select] = salc_sci[(species,component)] C_salcs[:,select] = Cmat[component] E_salcs[select] = f salc_order = np.argsort(E_salcs) for kind in self.mo.keys(): symorb[kind] = OrbitalSet(C_salcs[:,salc_order], energies=E_salcs[salc_order], irreps=supsym[salc_order], basis_set=self.mo[kind].basis_set) return Wavefunction(symorb, self.basis_set, n_sym=self.n_sym, n_bas=self.n_bas) def symmetrize(self): """ Symmetrizes the wavefunction """ if msym is None: raise ImportError('no libmsym installation found') bs = self.basis_set elements = [msym.Element(coordinates = Coord, charge = int(Charge)) for Coord, Charge in zip(bs.center_coordinates, bs.center_charges)] basis_functions = [msym.RealSphericalHarmonic(element = elements[element_id-1], n = n + l, l = l, m = m) for [element_id, n, l, m] in bs.contracted_ids] Smat_ao = np.asmatrix(self.basis_set.overlap) symorb = {} with msym.Context(elements = elements, basis_functions = basis_functions) as ctx: point_group = ctx.find_symmetry() species = ctx.character_table.symmetry_species species_names = [s.name for s in species] for kind in self.mo.keys(): # remove overlap C_mo = np.asmatrix(self.mo[kind].coefficients) s,U = np.linalg.eigh(C_mo.T*C_mo) U_lowdin = U * np.diag(1/np.sqrt(s)) * U.T C_mo = C_mo * U_lowdin (salcs, species, partner_functions) = ctx.symmetrize_wavefunctions(C_mo.T) # orthonormalize s,U = np.linalg.eigh(salcs * Smat_ao * salcs.T) U_lowdin = U * np.diag(1/np.sqrt(s)) * U.T C_mo = C_mo * U_lowdin symorb[kind] = OrbitalSet(C_mo, energies=self.mo[kind].energies, irreps=self.mo[kind].irreps, basis_set=self.mo[kind].basis_set) return Wavefunction(symorb, self.basis_set, n_sym=self.n_sym, n_bas=self.n_bas) def destroy_native_symmetry(self): """ permanently remove native symmetry from the wavefunction """ if self.n_sym > 1: if self.salcs is not None: self.n_sym = 1 self.n_bas = [sum(self.n_bas)] self.salcs = None else: raise DataNotAvailable('desymmetrization not possible without SALCs') return def symmetry_blocked_orbitals(self, kind='restricted'): """ Returns a list of orbital sets, one for each native symmetry. The orbitals will have no basis set information any longer. """ orbitals = self.mo[kind].copy() orbitals.basis_set = None if self.n_sym == 1: return [orbitals] if self.salcs is not None: salcs = self.salcs[orbitals.basis_ids,:] orbitals.coeffients = np.dot(salcs.T, orbitals.coefficients) orbital_list = [] offset = 0 for irrep, nb in enumerate(self.n_bas): mo_set, = np.where(orbitals.irreps == irrep) orbs = orbitals[mo_set].filter_basis(range(offset, offset+nb)) orbital_list.append(orbs) offset += nb return orbital_list def mulliken_charges(self): """ perform a mulliken population analysis """ if self.basis_set.overlap is not None: Smat_ao = np.asmatrix(self.basis_set.overlap) else: raise Exception('mulliken analysis is missing the overlap matrix') population = {} for kind, mo in self.mo.items(): population[kind] = np.zeros(len(self.basis_set.center_charges)) Cmat = np.asmatrix(mo.coefficients) D = Cmat * np.diag(mo.occupations) * Cmat.T DS = np.multiply(D, Smat_ao) for i, (ao, basis_id) in enumerate(zip(np.asarray(DS), mo.basis_ids)): pop = np.sum(ao) cgto_tuple = mo.basis_set.contracted_ids[basis_id] center_id, l, n, m = cgto_tuple population[kind][center_id-1] += pop if self.unrestricted: population_total = population['alpha'] + population['beta'] else: population_total = population['restricted'] mulliken_charges = self.basis_set.center_charges - population_total return mulliken_charges def natorb(self, root=1, kind='restricted'): """ return a set of natural orbitals that diagonalize the density matrix of a specific root. """ if self.densities is None: raise DataNotAvailable('density natrices are missing') try: density = self.densities[root-1,:,:] except IndexError: raise DataNotAvailable('density matrix missing for root {:d}'.format(root)) orbitals = self.mo[kind].copy() irreps = orbitals.irreps types = orbitals.types C_mo = orbitals.coefficients O_mo = orbitals.occupations offset = 0 for irrep in np.unique(irreps): for active in ['1', '2', '3']: mo_set, = np.where((irreps == irrep) & (types == active)) n_orb = len(mo_set) if n_orb == 0: continue C_mat = np.asmatrix(C_mo[:,mo_set]) dens = density[offset:offset+n_orb,offset:offset+n_orb] s,U = np.linalg.eigh(dens) C_mat = C_mat * U order = np.argsort(-s) C_mo[:,mo_set] = C_mat[:,order] O_mo[mo_set] = s[order] offset += n_orb return orbitals def spinnatorb(self, root=1, kind='restricted'): """ return a set of natural orbitals that diagonalize the spin density matrix of a specific root. """ if self.spindens is None: raise DataNotAvailable('spin density natrices are missing') try: density = self.spindens[root-1,:,:] except IndexError: raise DataNotAvailable('spin density matrix missing for root {:d}'.format(root)) orbitals = self.mo[kind].copy() irreps = orbitals.irreps types = orbitals.types C_mo = orbitals.coefficients O_mo = orbitals.occupations offset = 0 for irrep in np.unique(irreps): for active in ['1', '2', '3']: mo_set, = np.where((irreps == irrep) & (types == active)) n_orb = len(mo_set) if n_orb == 0: continue C_mat = np.asmatrix(C_mo[:,mo_set]) dens = density[offset:offset+n_orb,offset:offset+n_orb] s,U = np.linalg.eigh(dens) C_mat = C_mat * U order = np.argsort(-s) C_mo[:,mo_set] = C_mat[:,order] O_mo[mo_set] = s[order] offset += n_orb return orbitals def read_wfaorbs(self, filename): """ Read orbitals created by the WFA module from the HDF5 file. """ wfaorbs = {} f = MolcasHDF5(filename, 'r') for data in f.h5f['WFA']: if 'VECTORS' in data: orbtype = data[:data.index('_VECTORS')] n_bas_t = sum(bas for bas in self.n_bas) mo_occupations = f.mo_occupations(kind='restricted', orbtype="WFA/" + orbtype) mo_energies = f.mo_energies(kind='restricted', orbtype="WFA/" + orbtype) C_mat = f.mo_vectors(kind='restricted', orbtype="WFA/" + orbtype) assert len(C_mat)%n_bas_t == 0, "Inconsistent MO-coefficients" otout = orbtype.replace('DESYM_', '').replace('(','-').replace(')','-') wfaorbs[otout] = OrbitalSet(C_mat.reshape((n_bas_t, len(C_mat)//n_bas_t), order='F'), energies=mo_energies, occupations=mo_occupations, basis_set=self.basis_set) return wfaorbs @staticmethod def _print_mo_header(kind=None, width=128, delim='*'): if kind is not None: text = kind + ' molecular orbitals' else: text = 'molecular orbitals' starline = delim * width starskip = delim + ' ' * (width - 2) + delim titleline = delim + text.title().center(width - 2, ' ') + delim print('\n'.join([starline, starskip, titleline, starskip, starline, ''])) @classmethod def from_h5(cls, filename): """ Generates a wavefunction from a Molcas HDF5 file """ f = MolcasHDF5(filename, 'r') n_bas = f.n_bas if n_bas is None: raise Exception('no basis set size available on file') n_sym = len(n_bas) if n_sym > 1: n_atoms = f.natoms_all() center_labels = f.desym_center_labels() center_charges = f.desym_center_charges() center_coordinates = f.desym_center_coordinates() contracted_ids = f.desym_basis_function_ids() else: n_atoms = f.natoms_unique() center_labels = f.center_labels() center_charges = f.center_charges() center_coordinates = f.center_coordinates() contracted_ids = f.basis_function_ids() primitive_ids = f.primitive_ids() primitives = f.primitives() if n_sym > 1: mo_irreps = np.empty(sum(n_bas), dtype=np.int) offset = 0 for irrep, nb in enumerate(n_bas): mo_irreps[offset:offset+nb] = irrep offset += nb salcs = f.desym_matrix() else: mo_irreps = f.supsym_irrep_indices() salcs = None try: overlap = f.ao_overlap_matrix() overlap = cls.reshape_square(overlap, n_bas) if overlap is not None and n_sym > 1: overlap = np.dot(np.dot(salcs, overlap), salcs.T) except DataNotAvailable: overlap = None try: fockint = f.ao_fockint_matrix() fockint = cls.reshape_square(fockint, n_bas) if fockint is not None and n_sym > 1: fockint = np.dot(np.dot(salcs, fockint), salcs.T) except DataNotAvailable: fockint = None basis_set = BasisSet( center_labels, center_charges, center_coordinates, contracted_ids, primitive_ids, primitives, overlap, fockint, ) unrestricted = f.unrestricted() if unrestricted: kinds = ['alpha', 'beta'] else: kinds = ['restricted'] mo = {} for kind in kinds: mo_occupations = f.mo_occupations(kind=kind) mo_energies = f.mo_energies(kind=kind) mo_typeindices = f.mo_typeindices(kind=kind) mo_vectors = f.mo_vectors(kind=kind) mo_vectors = cls.reshape_square(mo_vectors, n_bas) if n_sym > 1: mo_vectors = np.dot(salcs, mo_vectors) mo[kind] = OrbitalSet(mo_vectors, types=mo_typeindices, irreps=mo_irreps, energies=mo_energies, occupations=mo_occupations, basis_set=basis_set) try: ispin = f.ispin() except DataNotAvailable: ispin = None try: densities = f.densities() except DataNotAvailable: densities = None try: spindens = f.spindens() except DataNotAvailable: spindens = None return cls(mo, basis_set, salcs=salcs, densities=densities, spindens=spindens, spinmult=ispin, n_sym=n_sym, n_bas=n_bas) @classmethod def from_inporb(cls, filename): """ Generates a wavefunction from a Molcas INPORB file """ f = MolcasINPORB(filename, 'r') n_bas = f.n_bas if n_bas is None: raise Exception('no basis set size available on file') n_sym = len(n_bas) mo_irreps = np.empty(sum(n_bas), dtype=np.int) offset = 0 for irrep, nb in enumerate(n_bas): mo_irreps[offset:offset+nb] = irrep offset += nb unrestricted = f.unrestricted if unrestricted: kinds = ['alpha', 'beta'] else: kinds = ['restricted'] mo = {} for kind in kinds: f.rewind() # order of reading matters! mo_vectors = f.read_orb(kind=kind) mo_occupations = f.read_occ(kind=kind) mo_energies = f.read_one(kind=kind) mo_typeindices = f.read_index() mo_vectors = cls.reshape_square(mo_vectors, n_bas) mo[kind] = OrbitalSet(mo_vectors, types=mo_typeindices, irreps=mo_irreps, energies=mo_energies, occupations=mo_occupations) return cls(mo, None, n_sym=n_sym, n_bas=n_bas) @staticmethod def reshape_square(arr, dims): """ Return a block-diagonal array where the blocks are constructed from a flat input array and an array of dimensions for each block. The array is assumed to be layed out in memory using Fortran indexing. """ if len(dims) == 1: dim = dims[0] return arr.reshape((dims[0],dims[0]), order='F') lst = [] offset = 0 for dim in dims: slice_ = arr[offset:offset+dim**2] lst.append(slice_.reshape((dim,dim), order='F')) offset += dim**2 return la.block_diag(*lst)
steabert/molpy
molpy/wfn.py
Python
gpl-2.0
24,129
[ "MOLCAS" ]
c42d62c6683d8c28372be4ce8a8d021939c939f39368e49352de94fdb8aaec92
# -*- coding: utf-8 -*- # Copyright 2013 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import copy import datetime import itertools import os import six import traceback from oslo.serialization import jsonutils from sqlalchemy import or_ from nailgun import consts from nailgun import notifier from nailgun import objects from nailgun.settings import settings from nailgun.consts import TASK_STATUSES from nailgun.db import db from nailgun.db.sqlalchemy.models import IPAddr from nailgun.db.sqlalchemy.models import Node from nailgun.db.sqlalchemy.models import Release from nailgun.logger import logger from nailgun.network import utils as net_utils from nailgun.task.helpers import TaskHelper class NailgunReceiver(object): @classmethod def remove_nodes_resp(cls, **kwargs): logger.info( "RPC method remove_nodes_resp received: %s" % jsonutils.dumps(kwargs) ) task_uuid = kwargs.get('task_uuid') nodes = kwargs.get('nodes') or [] error_nodes = kwargs.get('error_nodes') or [] inaccessible_nodes = kwargs.get('inaccessible_nodes') or [] error_msg = kwargs.get('error') status = kwargs.get('status') progress = kwargs.get('progress') if status in [consts.TASK_STATUSES.ready, consts.TASK_STATUSES.error]: progress = 100 # locking tasks on cluster task = objects.Task.get_by_uuid(task_uuid, fail_if_not_found=True) objects.TaskCollection.lock_cluster_tasks(task.cluster_id) task = objects.Task.get_by_uuid( task_uuid, fail_if_not_found=True, lock_for_update=True ) # locking cluster if task.cluster_id is not None: objects.Cluster.get_by_uid( task.cluster_id, fail_if_not_found=True, lock_for_update=True ) # locking nodes all_nodes = itertools.chain(nodes, error_nodes, inaccessible_nodes) all_nodes_ids = [ node['id'] if 'id' in node else node['uid'] for node in all_nodes ] locked_nodes = objects.NodeCollection.filter_by_list( None, 'id', all_nodes_ids, order_by='id' ) objects.NodeCollection.lock_for_update(locked_nodes).all() for node in nodes: node_db = objects.Node.get_by_uid(node['uid']) if not node_db: logger.error( u"Failed to delete node '%s': node doesn't exist", str(node) ) else: db().delete(node_db) for node in inaccessible_nodes: # Nodes which not answered by rpc just removed from db node_db = objects.Node.get_by_uid(node['uid']) if node_db: logger.warn( u'Node %s not answered by RPC, removing from db', node_db.human_readable_name) db().delete(node_db) for node in error_nodes: node_db = objects.Node.get_by_uid(node['uid']) if not node_db: logger.error( u"Failed to delete node '%s' marked as error from Astute:" " node doesn't exist", str(node) ) else: node_db.pending_deletion = False node_db.status = 'error' db().add(node_db) node['name'] = node_db.name db().flush() success_msg = u"No nodes were removed" err_msg = u"No errors occurred" if nodes: success_msg = u"Successfully removed {0} node(s)".format( len(nodes) ) notifier.notify("done", success_msg) if error_nodes: err_msg = u"Failed to remove {0} node(s): {1}".format( len(error_nodes), ', '.join( [n.get('name') or "ID: {0}".format(n['uid']) for n in error_nodes]) ) notifier.notify("error", err_msg) if not error_msg: error_msg = ". ".join([success_msg, err_msg]) data = { 'status': status, 'progress': progress, 'message': error_msg, } objects.Task.update(task, data) cls._update_action_log_entry(status, task.name, task_uuid, nodes) @classmethod def remove_cluster_resp(cls, **kwargs): logger.info( "RPC method remove_cluster_resp received: %s" % jsonutils.dumps(kwargs) ) task_uuid = kwargs.get('task_uuid') # in remove_nodes_resp method all objects are already locked cls.remove_nodes_resp(**kwargs) task = objects.Task.get_by_uuid(task_uuid, fail_if_not_found=True) cluster = task.cluster if task.status in ('ready',): logger.debug("Removing environment itself") cluster_name = cluster.name ips = db().query(IPAddr).filter( IPAddr.network.in_([n.id for n in cluster.network_groups]) ) map(db().delete, ips) db().flush() db().delete(cluster) db().flush() notifier.notify( "done", u"Environment '%s' and all its nodes are deleted" % ( cluster_name ) ) elif task.status in ('error',): cluster.status = 'error' db().add(cluster) db().flush() if not task.message: task.message = "Failed to delete nodes:\n{0}".format( cls._generate_error_message( task, error_types=('deletion',) ) ) notifier.notify( "error", task.message, cluster.id ) @classmethod def deploy_resp(cls, **kwargs): logger.info( "RPC method deploy_resp received: %s" % jsonutils.dumps(kwargs) ) task_uuid = kwargs.get('task_uuid') nodes = kwargs.get('nodes') or [] message = kwargs.get('error') status = kwargs.get('status') progress = kwargs.get('progress') task = objects.Task.get_by_uuid( task_uuid, fail_if_not_found=True, ) # locking all cluster tasks objects.TaskCollection.lock_cluster_tasks(task.cluster_id) # lock cluster objects.Cluster.get_by_uid( task.cluster_id, fail_if_not_found=True, lock_for_update=True ) if not status: status = task.status # lock nodes for updating so they can't be deleted q_nodes = objects.NodeCollection.filter_by_id_list( None, [n['uid'] for n in nodes], ) q_nodes = objects.NodeCollection.order_by(q_nodes, 'id') objects.NodeCollection.lock_for_update(q_nodes).all() # First of all, let's update nodes in database for node in nodes: node_db = objects.Node.get_by_uid(node['uid']) if not node_db: logger.warning( u"No node found with uid '{0}' - nothing changed".format( node['uid'] ) ) continue update_fields = ( 'error_msg', 'error_type', 'status', 'progress', 'online' ) for param in update_fields: if param in node: logger.debug( u"Updating node {0} - set {1} to {2}".format( node['uid'], param, node[param] ) ) setattr(node_db, param, node[param]) if param == 'progress' and node.get('status') == 'error' \ or node.get('online') is False: # If failure occurred with node # it's progress should be 100 node_db.progress = 100 # Setting node error_msg for offline nodes if node.get('online') is False \ and not node_db.error_msg: node_db.error_msg = u"Node is offline" # Notification on particular node failure notifier.notify( "error", u"Failed to deploy node '{0}': {1}".format( node_db.name, node_db.error_msg or "Unknown error" ), cluster_id=task.cluster_id, node_id=node['uid'], task_uuid=task_uuid ) db().flush() if nodes and not progress: progress = TaskHelper.recalculate_deployment_task_progress(task) # Let's check the whole task status if status in ('error',): cls._error_action(task, status, progress, message) elif status in ('ready',): cls._success_action(task, status, progress) else: data = {'status': status, 'progress': progress, 'message': message} objects.Task.update(task, data) cls._update_action_log_entry(status, task.name, task_uuid, nodes) @classmethod def provision_resp(cls, **kwargs): logger.info( "RPC method provision_resp received: %s" % jsonutils.dumps(kwargs)) task_uuid = kwargs.get('task_uuid') message = kwargs.get('error') status = kwargs.get('status') progress = kwargs.get('progress') nodes = kwargs.get('nodes', []) task = objects.Task.get_by_uuid( task_uuid, fail_if_not_found=True, lock_for_update=True ) # if task was failed on master node then we should # mark all cluster's nodes in error state master = next(( n for n in nodes if n['uid'] == consts.MASTER_ROLE), {}) # we should remove master node from the nodes since it requires # special handling and won't work with old code if master: nodes.pop(nodes.index(master)) if master.get('status') == 'error': status = 'error' progress = 100 # lock nodes for updating q_nodes = objects.NodeCollection.filter_by_id_list( None, [n['uid'] for n in nodes]) q_nodes = objects.NodeCollection.order_by(q_nodes, 'id') objects.NodeCollection.lock_for_update(q_nodes).all() for node in nodes: uid = node.get('uid') node_db = objects.Node.get_by_uid(node['uid']) if not node_db: logger.warn('Node with uid "{0}" not found'.format(uid)) continue if node.get('status') == 'error': node_db.status = 'error' node_db.progress = 100 node_db.error_type = 'provision' node_db.error_msg = node.get('error_msg', 'Unknown error') else: node_db.status = node.get('status') node_db.progress = node.get('progress') db().flush() if nodes and not progress: progress = TaskHelper.recalculate_provisioning_task_progress(task) data = {'status': status, 'progress': progress, 'message': message} objects.Task.update(task, data) cls._update_action_log_entry(status, task.name, task_uuid, nodes) @classmethod def _update_action_log_entry(cls, task_status, task_name, task_uuid, nodes_from_resp): try: if task_status in (consts.TASK_STATUSES.ready, consts.TASK_STATUSES.error): al = objects.ActionLog.get_by_kwargs(task_uuid=task_uuid, action_name=task_name) if al: data = { 'end_timestamp': datetime.datetime.utcnow(), 'additional_info': { 'nodes_from_resp': cls.sanitize_nodes_from_resp( nodes_from_resp), 'ended_with_status': task_status } } objects.ActionLog.update(al, data) except Exception as e: logger.error("_update_action_log_entry failed: %s", six.text_type(e)) @classmethod def sanitize_nodes_from_resp(cls, nodes): resp = [] if isinstance(nodes, list): for n in nodes: if isinstance(n, dict) and 'uid' in n: resp.append(n['uid']) return resp @classmethod def _generate_error_message(cls, task, error_types, names_only=False): nodes_info = [] error_nodes = db().query(Node).filter_by( cluster_id=task.cluster_id ).filter( or_( Node.status == 'error', Node.online == (False) ) ).filter( Node.error_type.in_(error_types) ).all() for n in error_nodes: if names_only: nodes_info.append(u"'{0}'".format(n.name)) else: nodes_info.append(u"'{0}': {1}".format(n.name, n.error_msg)) if nodes_info: if names_only: message = u", ".join(nodes_info) else: message = u"\n".join(nodes_info) else: message = u"Unknown error" return message @classmethod def _error_action(cls, task, status, progress, message=None): task_name = task.name.title() if message: message = u"{0} has failed. {1}".format(task_name, message) else: message = u"{0} has failed. Check these nodes:\n{1}".format( task_name, cls._generate_error_message( task, error_types=('deploy', 'provision'), names_only=True ) ) notifier.notify( "error", message, task.cluster_id ) data = {'status': status, 'progress': progress, 'message': message} objects.Task.update(task, data) @classmethod def _success_action(cls, task, status, progress): # check if all nodes are ready if any(map(lambda n: n.status == 'error', task.cluster.nodes)): cls._error_action(task, 'error', 100) return task_name = task.name.title() if task.cluster.mode in ('singlenode', 'multinode'): # determining horizon url - it's an IP # of a first cluster controller controller = db().query(Node).filter_by( cluster_id=task.cluster_id ).filter(Node.role_list.any(name='controller')).first() if controller: logger.debug( u"Controller is found, node_id=%s, " "getting it's IP addresses", controller.id ) public_net = filter( lambda n: n['name'] == 'public' and 'ip' in n, objects.Node.get_network_manager( controller ).get_node_networks(controller) ) if public_net: horizon_ip = public_net[0]['ip'].split('/')[0] message = ( u"{0} of environment '{1}' is done. " "Access the OpenStack dashboard (Horizon) at " "http://{2}/ or via internal network at http://{3}/" ).format( task_name, task.cluster.name, horizon_ip, controller.ip ) else: message = u"{0} of environment '{1}' is done".format( task_name, task.cluster.name ) logger.warning( u"Public ip for controller node " "not found in '{0}'".format(task.cluster.name) ) else: message = u"{0} of environment '{1}' is done".format( task_name, task.cluster.name ) logger.warning(u"Controller node not found in '{0}'".format( task.cluster.name )) elif task.cluster.is_ha_mode: # determining horizon url in HA mode - it's vip # from a public network saved in task cache try: message = ( u"{0} of environment '{1}' is done. " "Access the OpenStack dashboard (Horizon) at {2}" ).format( task_name, task.cluster.name, objects.Cluster.get_network_manager( task.cluster ).get_horizon_url(task.cluster.id) ) except Exception as exc: logger.error(": ".join([ str(exc), traceback.format_exc() ])) message = u"{0} of environment '{1}' is done".format( task_name, task.cluster.name ) logger.warning( u"Cannot find virtual IP for '{0}'".format( task.cluster.name ) ) zabbix_url = objects.Cluster.get_network_manager( task.cluster ).get_zabbix_url(task.cluster) if zabbix_url: message = "{0} Access Zabbix dashboard at {1}".format( message, zabbix_url) plugins_msg = cls._make_plugins_success_message(task.cluster.plugins) if plugins_msg: message = '{0}\n\n{1}'.format(message, plugins_msg) notifier.notify("done", message, task.cluster_id) data = {'status': status, 'progress': progress, 'message': message} objects.Task.update(task, data) @classmethod def _make_plugins_success_message(cls, plugins): """Makes plugins installation message """ msg = 'Plugin {0} is deployed. {1}' return '\n'.join( map(lambda p: msg.format(p.name, p.description), plugins)) @classmethod def stop_deployment_resp(cls, **kwargs): logger.info( "RPC method stop_deployment_resp received: %s" % jsonutils.dumps(kwargs) ) task_uuid = kwargs.get('task_uuid') nodes = kwargs.get('nodes', []) ia_nodes = kwargs.get('inaccessible_nodes', []) message = kwargs.get('error') status = kwargs.get('status') progress = kwargs.get('progress') task = objects.Task.get_by_uuid( task_uuid, fail_if_not_found=True, ) # locking all cluster tasks objects.TaskCollection.lock_cluster_tasks(task.cluster_id) stopping_task_names = [ consts.TASK_NAMES.deploy, consts.TASK_NAMES.deployment, consts.TASK_NAMES.provision ] # Locking other tasks for stopping q_stop_tasks = objects.TaskCollection.filter_by_list( None, 'name', stopping_task_names ) q_stop_tasks = objects.TaskCollection.filter_by( q_stop_tasks, cluster_id=task.cluster_id ) q_stop_tasks = objects.TaskCollection.order_by( q_stop_tasks, 'id' ) stop_tasks = objects.TaskCollection.lock_for_update(q_stop_tasks).all() # Locking cluster objects.Cluster.get_by_uid( task.cluster_id, fail_if_not_found=True, lock_for_update=True ) if not stop_tasks: logger.warning("stop_deployment_resp: deployment tasks \ not found for environment '%s'!", task.cluster_id) if status == "ready": task.cluster.status = "stopped" if stop_tasks: map(db().delete, stop_tasks) node_uids = [n['uid'] for n in itertools.chain(nodes, ia_nodes)] q_nodes = objects.NodeCollection.filter_by_id_list(None, node_uids) q_nodes = objects.NodeCollection.filter_by( q_nodes, cluster_id=task.cluster_id ) q_nodes = objects.NodeCollection.order_by(q_nodes, 'id') q_nodes = objects.NodeCollection.lock_for_update(q_nodes) # locking Nodes for update update_nodes = objects.NodeCollection.lock_for_update( q_nodes ).all() for node in update_nodes: objects.Node.reset_to_discover(node) if ia_nodes: cls._notify_inaccessible( task.cluster_id, [n["uid"] for n in ia_nodes], u"deployment stopping" ) message = ( u"Deployment of environment '{0}' " u"was successfully stopped".format( task.cluster.name or task.cluster_id ) ) notifier.notify( "done", message, task.cluster_id ) data = {'status': status, 'progress': progress, 'message': message} objects.Task.update(task, data) cls._update_action_log_entry(status, task.name, task_uuid, nodes) @classmethod def reset_environment_resp(cls, **kwargs): logger.info( "RPC method reset_environment_resp received: %s", jsonutils.dumps(kwargs) ) task_uuid = kwargs.get('task_uuid') nodes = kwargs.get('nodes', []) ia_nodes = kwargs.get('inaccessible_nodes', []) message = kwargs.get('error') status = kwargs.get('status') progress = kwargs.get('progress') task = objects.Task.get_by_uuid( task_uuid, fail_if_not_found=True, lock_for_update=True ) # Locking cluster objects.Cluster.get_by_uid( task.cluster_id, fail_if_not_found=True, lock_for_update=True ) if status == consts.TASK_STATUSES.ready: # restoring pending changes task.cluster.status = consts.CLUSTER_STATUSES.new objects.Cluster.add_pending_changes( task.cluster, consts.CLUSTER_CHANGES.attributes ) objects.Cluster.add_pending_changes( task.cluster, consts.CLUSTER_CHANGES.networks ) node_uids = [n["uid"] for n in itertools.chain(nodes, ia_nodes)] q_nodes = objects.NodeCollection.filter_by_id_list(None, node_uids) q_nodes = objects.NodeCollection.filter_by( q_nodes, cluster_id=task.cluster_id ) q_nodes = objects.NodeCollection.order_by(q_nodes, 'id') # locking Nodes for update update_nodes = objects.NodeCollection.lock_for_update( q_nodes ).all() for node in update_nodes: objects.Node.reset_to_discover(node) if ia_nodes: cls._notify_inaccessible( task.cluster_id, [n["uid"] for n in ia_nodes], u"environment resetting" ) message = ( u"Environment '{0}' " u"was successfully reset".format( task.cluster.name or task.cluster_id ) ) notifier.notify( "done", message, task.cluster_id ) data = {'status': status, 'progress': progress, 'message': message} objects.Task.update(task, data) cls._update_action_log_entry(status, task.name, task_uuid, nodes) @classmethod def _notify_inaccessible(cls, cluster_id, nodes_uids, action): ia_nodes_db = db().query(Node.name).filter( Node.id.in_(nodes_uids), Node.cluster_id == cluster_id ).order_by(Node.id).yield_per(100) ia_message = ( u"Fuel couldn't reach these nodes during " u"{0}: {1}. Manual check may be needed.".format( action, u", ".join([ u"'{0}'".format(n.name) for n in ia_nodes_db ]) ) ) notifier.notify( "warning", ia_message, cluster_id ) @classmethod def verify_networks_resp(cls, **kwargs): logger.info( "RPC method verify_networks_resp received: %s" % jsonutils.dumps(kwargs) ) task_uuid = kwargs.get('task_uuid') nodes = kwargs.get('nodes') error_msg = kwargs.get('error') status = kwargs.get('status') progress = kwargs.get('progress') # We simply check that each node received all vlans for cluster task = objects.Task.get_by_uuid(task_uuid, fail_if_not_found=True) result = [] # We expect that 'nodes' contains all nodes which we test. # Situation when some nodes not answered must be processed # in orchestrator early. if nodes is None: # If no nodes in kwargs then we update progress or status only. pass elif isinstance(nodes, list): cached_nodes = task.cache['args']['nodes'] node_uids = [str(n['uid']) for n in nodes] cached_node_uids = [str(n['uid']) for n in cached_nodes] forgotten_uids = set(cached_node_uids) - set(node_uids) if forgotten_uids: absent_nodes = db().query(Node).filter( Node.id.in_(forgotten_uids) ).all() absent_node_names = [] for n in absent_nodes: if n.name: absent_node_names.append(n.name) else: absent_node_names.append('id: %s' % n.id) if not error_msg: error_msg = 'Node(s) {0} didn\'t return data.'.format( ', '.join(absent_node_names) ) status = 'error' else: error_nodes = [] for node in nodes: cached_nodes_filtered = filter( lambda n: str(n['uid']) == str(node['uid']), cached_nodes ) if not cached_nodes_filtered: logger.warning( "verify_networks_resp: arguments contain node " "data which is not in the task cache: %r", node ) continue cached_node = cached_nodes_filtered[0] for cached_network in cached_node['networks']: received_networks_filtered = filter( lambda n: n['iface'] == cached_network['iface'], node.get('networks', []) ) if received_networks_filtered: received_network = received_networks_filtered[0] absent_vlans = list( set(cached_network['vlans']) - set(received_network['vlans']) ) else: logger.warning( "verify_networks_resp: arguments don't contain" " data for interface: uid=%s iface=%s", node['uid'], cached_network['iface'] ) absent_vlans = cached_network['vlans'] if absent_vlans: data = {'uid': node['uid'], 'interface': cached_network['iface'], 'absent_vlans': absent_vlans} node_db = db().query(Node).get(node['uid']) if node_db: data['name'] = node_db.name db_nics = filter( lambda i: i.name == cached_network['iface'], node_db.interfaces ) if db_nics: data['mac'] = db_nics[0].mac else: logger.warning( "verify_networks_resp: can't find " "interface %r for node %r in DB", cached_network['iface'], node_db.id ) data['mac'] = 'unknown' else: logger.warning( "verify_networks_resp: can't find node " "%r in DB", node['uid'] ) error_nodes.append(data) if error_nodes: result = error_nodes status = 'error' else: error_msg = (error_msg or 'verify_networks_resp: argument "nodes"' ' have incorrect type') status = 'error' logger.error(error_msg) if status not in ('ready', 'error'): data = {'status': status, 'progress': progress, 'message': error_msg, 'result': result} objects.Task.update(task, data) else: objects.Task.update_verify_networks(task, status, progress, error_msg, result) cls._update_action_log_entry(status, task.name, task_uuid, nodes) @classmethod def multicast_verification_resp(cls, **kwargs): """Receiver for verification of multicast packages data - {1: response, 2: response} """ logger.info( u"RPC method multicast_resp received: {0}".format( jsonutils.dumps(kwargs)) ) task_uuid = kwargs.get('task_uuid') task = objects.task.Task.get_by_uuid(uuid=task_uuid) if kwargs.get('status'): task.status = kwargs['status'] task.progress = kwargs.get('progress', 0) response = kwargs.get('nodes', {}) error_msg = kwargs.get('error') if task.status == TASK_STATUSES.error: task.message = error_msg elif task.status == TASK_STATUSES.ready: errors = [] results = [] node_ids = set(config['uid'] for config in task.cache['args']['nodes']) not_received_nodes = node_ids - set(response.keys()) if not_received_nodes: msg = (u'No answer from nodes: {0}').format( list(not_received_nodes)) errors.append(msg) for node_id, received_ids in response.iteritems(): result = {} not_received_ids = node_ids - set(received_ids or []) result = {'node_id': node_id, 'not_received': list(not_received_ids)} results.append(result) if not_received_ids: msg = (u'Not received ids {0}' u' for node {1}.').format(not_received_ids, node_id) errors.append(msg) task.message = '\n'.join(errors) if errors: task.status = TASK_STATUSES.error task.result = results if task.status == TASK_STATUSES.ready: editable = copy.deepcopy(task.cluster.attributes.editable) editable['corosync']['verified']['value'] = True task.cluster.attributes.editable = editable logger.debug(u'Multicast verification message %s', task.message) objects.Task.update_verify_networks( task, task.status, task.progress, task.message, task.result) @classmethod def check_dhcp_resp(cls, **kwargs): """Receiver method for check_dhcp task For example of kwargs check FakeCheckingDhcpThread """ logger.info( "RPC method check_dhcp_resp received: %s", jsonutils.dumps(kwargs) ) messages = [] result = collections.defaultdict(list) message_template = ( u"Node {node_name} discovered DHCP server " u"via {iface} with following parameters: IP: {server_id}, " u"MAC: {mac}. This server will conflict with the installation.") task_uuid = kwargs.get('task_uuid') nodes = kwargs.get('nodes', []) error_msg = kwargs.get('error') status = kwargs.get('status') progress = kwargs.get('progress') nodes_uids = [node['uid'] for node in nodes] nodes_db = db().query(Node).filter(Node.id.in_(nodes_uids)).all() nodes_map = dict((str(node.id), node) for node in nodes_db) master_network_mac = settings.ADMIN_NETWORK['mac'] logger.debug('Mac addr on master node %s', master_network_mac) for node in nodes: if node['status'] == 'ready': for row in node.get('data', []): if not net_utils.is_same_mac(row['mac'], master_network_mac): node_db = nodes_map.get(node['uid']) if node_db: row['node_name'] = node_db.name message = message_template.format(**row) messages.append(message) result[node['uid']].append(row) else: logger.warning( 'Received message from nonexistent node. ' 'Message %s', row) status = status if not messages else "error" error_msg = '\n'.join(messages) if messages else error_msg logger.debug('Check dhcp message %s', error_msg) task = objects.Task.get_by_uuid(task_uuid, fail_if_not_found=True) objects.Task.update_verify_networks(task, status, progress, error_msg, result) @classmethod def download_release_resp(cls, **kwargs): logger.info( "RPC method download_release_resp received: %s" % jsonutils.dumps(kwargs) ) task_uuid = kwargs.get('task_uuid') error_msg = kwargs.get('error') status = kwargs.get('status') progress = kwargs.get('progress') task = objects.Task.get_by_uuid(task_uuid, fail_if_not_found=True) release_info = task.cache['args']['release_info'] release_id = release_info['release_id'] release = db().query(Release).get(release_id) if not release: logger.error("download_release_resp: Release" " with ID %s not found", release_id) return if error_msg: status = 'error' error_msg = "{0} download and preparation " \ "has failed.".format(release.name) cls._download_release_error( release_id, error_msg ) elif progress == 100 and status == 'ready': cls._download_release_completed(release_id) result = { "release_info": { "release_id": release_id } } data = {'status': status, 'progress': progress, 'message': error_msg, 'result': result} objects.Task.update(task, data) @classmethod def dump_environment_resp(cls, **kwargs): logger.info( "RPC method dump_environment_resp received: %s" % jsonutils.dumps(kwargs) ) task_uuid = kwargs.get('task_uuid') status = kwargs.get('status') progress = kwargs.get('progress') error = kwargs.get('error') msg = kwargs.get('msg') task = objects.Task.get_by_uuid(task_uuid, fail_if_not_found=True) if status == 'error': notifier.notify('error', error) data = {'status': status, 'progress': 100, 'message': error} objects.Task.update(task, data) elif status == 'ready': dumpfile = os.path.basename(msg) notifier.notify('done', 'Snapshot is ready. ' 'Visit Support page to download') data = {'status': status, 'progress': progress, 'message': '/dump/{0}'.format(dumpfile)} objects.Task.update(task, data)
zhaochao/fuel-web
nailgun/nailgun/rpc/receiver.py
Python
apache-2.0
39,053
[ "VisIt" ]
bde255f0945b3f7d1ce566cb0f6d61f9701c6778f468f8d198e77b13179491d3
# Written by Lauri Lehtovaara, 2007 """This module implements classes for time-dependent variables and operators.""" import numpy as np from gpaw.external_potential import ExternalPotential from gpaw.utilities import pack2, unpack from gpaw.mpi import run from gpaw.fd_operators import Laplace, Gradient from gpaw.tddft.abc import * # Hamiltonian class TimeDependentHamiltonian: """Time-dependent Hamiltonian, H(t) This class contains information required to apply time-dependent Hamiltonian to a wavefunction. """ def __init__(self, wfs, atoms, hamiltonian, td_potential): """Create the TimeDependentHamiltonian-object. The time-dependent potential object must (be None or) have a member function strength(self,time), which provides the strength of the time-dependent external potential to x-direction at the given time. Parameters ---------- wfs: FDWaveFunctions time-independent grid-based wavefunctions hamiltonian: Hamiltonian time-independent Hamiltonian td_potential: TimeDependentPotential time-dependent potential """ self.wfs = wfs self.hamiltonian = hamiltonian self.td_potential = td_potential self.time = self.old_time = 0 # internal smooth potential self.vt_sG = hamiltonian.gd.zeros(hamiltonian.nspins) # Increase the accuracy of Poisson solver self.hamiltonian.poisson.eps = 1e-12 # external potential #if hamiltonian.vext_g is None: # hamiltonian.vext_g = hamiltonian.finegd.zeros() #self.ti_vext_g = hamiltonian.vext_g #self.td_vext_g = hamiltonian.finegd.zeros(n=hamiltonian.nspins) self.P = None self.spos_ac = atoms.get_scaled_positions() % 1.0 self.absorbing_boundary = None def update(self, density, time): """Updates the time-dependent Hamiltonian. Parameters ---------- density: Density the density at the given time (TimeDependentDensity.get_density()) time: float the current time """ self.old_time = self.time = time self.hamiltonian.update(density) def half_update(self, density, time): """Updates the time-dependent Hamiltonian, in such a way, that a half of the old Hamiltonian is kept and the other half is updated. Parameters ---------- density: Density the density at the given time (TimeDependentDensity.get_density()) time: float the current time """ self.old_time = self.time self.time = time # copy old self.vt_sG[:] = self.hamiltonian.vt_sG self.dH_asp = {} for a, dH_sp in self.hamiltonian.dH_asp.items(): self.dH_asp[a] = dH_sp.copy() # update self.hamiltonian.update(density) # average and difference self.hamiltonian.vt_sG[:], self.vt_sG[:] = \ 0.5*(self.hamiltonian.vt_sG + self.vt_sG), \ self.hamiltonian.vt_sG - self.vt_sG for a, dH_sp in self.hamiltonian.dH_asp.items(): dH_sp[:], self.dH_asp[a][:] = 0.5*(dH_sp + self.dH_asp[a]), \ dH_sp - self.dH_asp[a] #pack/unpack is linear for real values def half_apply_local_potential(self, psit_nG, Htpsit_nG, s): """Apply the half-difference Hamiltonian operator to a set of vectors. Parameters: psit_nG: ndarray set of vectors to which the overlap operator is applied. psit_nG: ndarray, output resulting H applied to psit_nG vectors. s: int spin index of k-point object defined in kpoint.py. """ # Does exactly the same as Hamiltonian.apply_local_potential # but uses the difference between vt_sG at time t and t+dt. vt_G = self.vt_sG[s] if psit_nG.ndim == 3: Htpsit_nG += psit_nG * vt_G else: for psit_G, Htpsit_G in zip(psit_nG, Htpsit_nG): Htpsit_G += psit_G * vt_G def half_apply(self, kpt, psit, hpsit, calculate_P_ani=True): """Applies the half-difference of the time-dependent Hamiltonian to the wavefunction psit of the k-point kpt. Parameters ---------- kpt: Kpoint the current k-point (kpt_u[index_of_k-point]) psit: List of coarse grid the wavefuntions (on coarse grid) (kpt_u[index_of_k-point].psit_nG[indices_of_wavefunc]) hpsit: List of coarse grid the resulting "operated wavefunctions" (H psit) calculate_P_ani: bool When True, the integrals of projector times vectors P_ni = <p_i | psit> are calculated. When False, existing P_uni are used """ hpsit.fill(0.0) self.half_apply_local_potential(psit, hpsit, kpt.s) # Does exactly the same as last part of Hamiltonian.apply but # uses the difference between dH_asp at time t and t+dt. shape = psit.shape[:-3] P_axi = self.wfs.pt.dict(shape) if calculate_P_ani: self.wfs.pt.integrate(psit, P_axi, kpt.q) else: for a, P_ni in kpt.P_ani.items(): P_axi[a][:] = P_ni for a, P_xi in P_axi.items(): dH_ii = unpack(self.dH_asp[a][kpt.s]) P_axi[a][:] = np.dot(P_xi, dH_ii) self.wfs.pt.add(hpsit, P_axi, kpt.q) if self.td_potential is not None: # FIXME: add half difference here... but maybe it's not important # as this will be used only for getting initial guess. So, should # not affect to the results, only to the speed of convergence. #raise NotImplementedError pass def apply(self, kpt, psit, hpsit, calculate_P_ani=True): """Applies the time-dependent Hamiltonian to the wavefunction psit of the k-point kpt. Parameters ---------- kpt: Kpoint the current k-point (kpt_u[index_of_k-point]) psit: List of coarse grid the wavefuntions (on coarse grid) (kpt_u[index_of_k-point].psit_nG[indices_of_wavefunc]) hpsit: List of coarse grid the resulting "operated wavefunctions" (H psit) calculate_P_ani: bool When True, the integrals of projector times vectors P_ni = <p_i | psit> are calculated. When False, existing P_uni are used """ self.hamiltonian.apply(psit, hpsit, self.wfs, kpt, calculate_P_ani) # PAW correction if self.P is not None: self.P.add(psit, hpsit, self.wfs, kpt) # Absorbing boundary conditions # Imaginary potential if self.absorbing_boundary is not None \ and self.absorbing_boundary.type == 'IPOT': hpsit[:] += self.absorbing_boundary.get_potential_matrix() * psit # Perfectly matched layers if self.absorbing_boundary is not None \ and self.absorbing_boundary.type == 'PML': # Perfectly matched layer is applied as potential Vpml = Tpml-T # Where T = -0.5*\nabla^{2}\psi (Use latex for these equations) # See abc.py for details # This is probably not the most optimal approach and slows # the propagation. if self.lpsit is None: self.lpsit = self.hamiltonian.gd.empty( n=len(psit), dtype=complex ) self.laplace.apply(psit, self.lpsit, kpt.phase_cd) hpsit[:] -= (.5 * (self.absorbing_boundary.get_G()**2 - 1.0) * self.lpsit) for i in range(3): self.gradient[i].apply(psit, self.lpsit, kpt.phase_cd) hpsit[:] -= (.5 * self.absorbing_boundary.get_G() * self.absorbing_boundary.get_dG()[i] * self.lpsit) # Time-dependent dipole field if self.td_potential is not None: #TODO on shaky ground here... strength = self.td_potential.strength ExternalPotential().add_linear_field(self.wfs, self.spos_ac, psit, hpsit, 0.5 * strength(self.time) + 0.5 * strength(self.old_time), kpt) def set_absorbing_boundary(self, absorbing_boundary): """ Sets up the absorbing boundary. Parameters: absorbing_boundary: absorbing boundary object of any kind. """ self.absorbing_boundary = absorbing_boundary self.absorbing_boundary.set_up(self.hamiltonian.gd) if self.absorbing_boundary.type == 'PML': gd = self.hamiltonian.gd self.laplace = Laplace(gd, n=2, dtype=complex) self.gradient = np.array((Gradient(gd,0, n=2, dtype=complex), Gradient(gd,1, n=2, dtype=complex), Gradient(gd,2, n=2, dtype=complex))) self.lpsit=None # AbsorptionKickHamiltonian class AbsorptionKickHamiltonian: """Absorption kick Hamiltonian, p.r This class contains information required to apply absorption kick Hamiltonian to a wavefunction. """ def __init__(self, wfs, atoms, strength=[0.0, 0.0, 1e-3]): """Create the AbsorptionKickHamiltonian-object. Parameters ---------- wfs: FDWaveFunctions time-independent grid-based wavefunctions atoms: Atoms list of atoms strength: float[3] strength of the delta field to different directions """ self.wfs = wfs self.spos_ac = atoms.get_scaled_positions() % 1.0 # magnitude magnitude = np.sqrt(strength[0]*strength[0] + strength[1]*strength[1] + strength[2]*strength[2]) # iterations self.iterations = int(round(magnitude / 1.0e-4)) if self.iterations < 1: self.iterations = 1 # delta p self.dp = strength / self.iterations # hamiltonian self.abs_hamiltonian = np.array([self.dp[0], self.dp[1], self.dp[2]]) def update(self, density, time): """Dummy function = does nothing. Required to have correct interface. Parameters ---------- density: Density or None the density at the given time or None (ignored) time: Float or None the current time (ignored) """ pass def half_update(self, density, time): """Dummy function = does nothing. Required to have correct interface. Parameters ---------- density: Density or None the density at the given time or None (ignored) time: float or None the current time (ignored) """ pass def apply(self, kpt, psit, hpsit, calculate_P_ani=True): """Applies the absorption kick Hamiltonian to the wavefunction psit of the k-point kpt. Parameters ---------- kpt: Kpoint the current k-point (kpt_u[index_of_k-point]) psit: List of coarse grids the wavefuntions (on coarse grid) (kpt_u[index_of_k-point].psit_nG[indices_of_wavefunc]) hpsit: List of coarse grids the resulting "operated wavefunctions" (H psit) calculate_P_ani: bool When True, the integrals of projector times vectors P_ni = <p_i | psit> are calculated. When False, existing P_uni are used """ hpsit[:] = 0.0 #TODO on shaky ground here... ExternalPotential().add_linear_field(self.wfs, self.spos_ac, psit, hpsit, self.abs_hamiltonian, kpt) # Overlap class TimeDependentOverlap: """Time-dependent overlap operator S(t) This class contains information required to apply time-dependent overlap operator to a wavefunction. """ def __init__(self, wfs): """Creates the TimeDependentOverlap-object. Parameters ---------- wfs: FDWaveFunctions time-independent grid-based wavefunctions """ self.wfs = wfs self.overlap = wfs.overlap def update_k_point_projections(self, kpt, psit=None): """Updates the projector function overlap integrals with the wavefunctions of a given k-point. Parameters ---------- kpt: Kpoint the current k-point (kpt_u[index_of_k-point]) psit: List of coarse grids (optional) the wavefuntions (on coarse grid) (kpt_u[index_of_k-point].psit_nG[indices_of_wavefunc]) """ if psit is not None: self.wfs.pt.integrate(psit, kpt.P_ani, kpt.q) else: self.wfs.pt.integrate(kpt.psit_nG, kpt.P_ani, kpt.q) def update(self): """Updates the time-dependent overlap operator. Parameters ---------- None """ for kpt in self.wfs.kpt_u: self.update_k_point_projections(kpt) def half_update(self): """Updates the time-dependent overlap operator, in such a way, that a half of the old overlap operator is kept and the other half is updated. !Currently does nothing! Parameters ---------- None """ #for kpt in self.wfs.kpt_u: # # copy old # P_ani = {} # for a,P_ni in kpt.P_ani.items(): # P_ani[a] = P_ni.copy() # # update # self.update_k_point_projections(kpt) # # average # for a,P_ni in P_ani.items(): # kpt.P_ani[a] += P_ni # kpt.P_ani[a] *= .5 # !!! FIX ME !!! update overlap operator/projectors/... pass def apply(self, kpt, psit, spsit, calculate_P_ani=True): """Apply the time-dependent overlap operator to the wavefunction psit of the k-point kpt. Parameters ---------- kpt: Kpoint the current k-point (kpt_u[index_of_k-point]) psit: List of coarse grids the wavefuntions (on coarse grid) (kpt_u[index_of_k-point].psit_nG[indices_of_wavefunc]) spsit: List of coarse grids the resulting "operated wavefunctions" (S psit) calculate_P_ani: bool When True, the integrals of projector times vectors P_ni = <p_i | psit> are calculated. When False, existing P_ani are used """ self.overlap.apply(psit, spsit, self.wfs, kpt, calculate_P_ani) def apply_inverse(self, kpt, psit, sinvpsit, calculate_P_ani=True): """Apply the approximative time-dependent inverse overlap operator to the wavefunction psit of the k-point kpt. Parameters ---------- kpt: Kpoint the current k-point (kpt_u[index_of_k-point]) psit: List of coarse grids the wavefuntions (on coarse grid) (kpt_u[index_of_k-point].psit_nG[indices_of_wavefunc]) sinvpsit: List of coarse grids the resulting "operated wavefunctions" (S^(-1) psit) calculate_P_ani: bool When True, the integrals of projector times vectors P_ni = <p_i | psit> are calculated. When False, existing P_uni are used """ self.overlap.apply_inverse(psit, sinvpsit, self.wfs, kpt, calculate_P_ani) # DummyDensity class DummyDensity: """Implements dummy (= does nothing) density for AbsorptionKick.""" def __init__(self, wfs): """Placeholder Density object for AbsorptionKick. Parameters ---------- wfs: FDWaveFunctions time-independent grid-based wavefunctions """ self.wfs = wfs def update(self): pass def get_wavefunctions(self): return self.wfs def get_density(self): return None # Density class TimeDependentDensity(DummyDensity): """Time-dependent density rho(t) This class contains information required to get the time-dependent density. """ def __init__(self, paw): """Creates the TimeDependentDensity-object. Parameters ---------- paw: PAW the PAW-object """ DummyDensity.__init__(self, paw.wfs) self.density = paw.density def update(self): """Updates the time-dependent density. Parameters ---------- None """ #for kpt in self.wfs.kpt_u: # self.wfs.pt.integrate(kpt.psit_nG, kpt.P_ani) self.density.update(self.wfs) def get_density(self): """Returns the current density. Parameters ---------- None """ return self.density
qsnake/gpaw
gpaw/tddft/tdopers.py
Python
gpl-3.0
17,684
[ "GPAW" ]
7df5e89b01f4d9d50f60741741be600b8813a68a44c17ada73842bdbd849ce06
import sys import os from PyQt5 import QtCore, QtWidgets import vtk from vtk.qt.QVTKRenderWindowInteractor import QVTKRenderWindowInteractor import chigger import mooseutils from ExodusPlugin import ExodusPlugin class RetinaQVTKRenderWindowInteractor(QVTKRenderWindowInteractor): """ Currently VTK7.1 and Qt5 do not work correctly on retina displays: http://public.kitware.com/pipermail/vtk-developers/2017-February/034738.html However, creating a custom resizeEvent method and wrapping the QVTKRenderWindowInteractor object in a QFrame allowed it to work for now. The idea for this wrapping came from: https://github.com/siudej/Eigenvalues/blob/master/qvtk.py """ def resizeEvent(self, event): """ Double the size on retina displays. This is not the right way to do it, but this works for framed widgets. We also need to modify all mouse events to adjust the interactor's center (e.g. for joystick mode). """ super(RetinaQVTKRenderWindowInteractor, self).resizeEvent(event) ratio = self.devicePixelRatio() w = self.width() h = self.height() if (self.parent() is not None) and (w <= self.parent().width()): self.resize(ratio*self.size()) self.GetRenderWindow().SetSize(ratio*w, ratio*h) self.GetRenderWindow().GetInteractor().SetSize(ratio*w, ratio*h) self.GetRenderWindow().GetInteractor().ConfigureEvent() self.update() class VTKWindowPlugin(QtWidgets.QFrame, ExodusPlugin): """ Plugin for volume rendering of ExodusII data with VTK via chigger. """ #: pyqtSignal: Emitted when the result is first rendered windowCreated = QtCore.pyqtSignal(chigger.exodus.ExodusReader, chigger.exodus.ExodusResult, chigger.RenderWindow) #: pyqtSignal: Emitted with the window has been updated windowUpdated = QtCore.pyqtSignal() #: pyqtSignal: Emitted when the window is reset/cleared windowReset = QtCore.pyqtSignal() #: pyqtSignal: Emitted when the camera for this window has changed cameraChanged = QtCore.pyqtSignal(vtk.vtkCamera) def __init__(self, size=None): super(VTKWindowPlugin, self).__init__() # Setup widget self.setSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding) self.setMainLayoutName('WindowLayout') # Create the QVTK interactor if self.devicePixelRatio() > 1: self.__qvtkinteractor = RetinaQVTKRenderWindowInteractor(self) else: self.__qvtkinteractor = QVTKRenderWindowInteractor(self) # Member variables self._highlight = None self._reader = None self._result = None self._filename = None self._initialized = False self._run_start_time = None self._window = chigger.RenderWindow(vtkwindow=self.__qvtkinteractor.GetRenderWindow(), vtkinteractor=self.__qvtkinteractor.GetRenderWindow().GetInteractor(), size=size) # Set to True when the window needs to be reset (e.g., when the input file was changed) self._reset_required = False # If size is provided, restrict the window if size != None: self.setFixedSize(QtCore.QSize(*size)) # Define timers for initialization and updating data self._timers = dict() for name in ['update', 'initialize']: self._timers[name] = QtCore.QTimer() self._timers[name].setInterval(1000) self._timers['update'].timeout.connect(self.onWindowRequiresUpdate) self._timers['initialize'].timeout.connect(self.onReloadWindow) # Create a "tight" layout and add the QVTK widget self._layout = QtWidgets.QHBoxLayout() self._layout.setContentsMargins(0, 0, 0, 0) self._layout.addWidget(self.__qvtkinteractor) self.setLayout(self._layout) self.setup() def reset(self): """ Clears the VTK windows and restarts the initialize timer. """ self._window.clear() self._window.update() self._initialized = False self._reset_required = False self._adjustTimers(start=['initialize'], stop=['update']) self.windowReset.emit() def onReloadWindow(self): """ Reloads the current file. """ if self._filename: self.onFileChanged(self._filename) def initialize(self, *args, **kwargs): """ Assumes that first file in the supplied list should be loaded, if a FilePlugin is not present. Input: filenames[list]: (optional) List of filenames to initialize the VTK window with, the first will be displayed and only if a FilePlugin is not present. """ if len(args) == 1 and isinstance(args[0], list) and not hasattr(self.parent(), 'FilePlugin'): self.onFileChanged(args[0][0]) def onFileChanged(self, filename): """ Initialize the VTK window to read and display a file. If the file is not valid a timer is started to continue to attempt to initialize the window. When a result is found and rendered, the windowCreated signal is emitted containing the chigger reader and result. Inputs: filename[str]: The filename to open. """ if (filename != self._filename) or self._reset_required: self.reset() # Do nothing if the widget is not visible or the file doesn't exist self._filename = filename file_exists = os.path.exists(self._filename) if not self.isVisible() or not file_exists: return # Determine if the file and GUI are in a valid state for rendering result if file_exists and self._run_start_time: if os.path.getmtime(self._filename) < self._run_start_time: self.reset() return # Call the base class initialization (this enables the plugin) super(VTKWindowPlugin, self).initialize() # Clear any-existing VTK objects on the window self._window.clear() # Create the reader and result chigger objects self._reader = chigger.exodus.ExodusReader(filename) self._result = chigger.exodus.ExodusResult(self._reader) # Set the interaction mode (2D/3D) self._result.update() bmin, bmax = self._result.getBounds(check=sys.platform=='darwin') if abs(bmax[-1] - bmin[-1]) < 1e-10: self._window.setOption('style', 'interactive2D') else: self._window.setOption('style', 'interactive') # Add results self._window.append(self._result) # Connect the camera to the modified event self._result.getVTKRenderer().GetActiveCamera().AddObserver(vtk.vtkCommand.ModifiedEvent, self._cameraModifiedCallback) # Update the RenderWindow self._initialized = True self._window.resetCamera() # this needs to be here to get the objects to show up correctly, I have no idea why. self._window.update() self._adjustTimers(start=['update'], stop=['initialize']) self.windowCreated.emit(self._reader, self._result, self._window) def onInputFileChanged(self, *args): """ Force window to reset on the next update b/c the input file has changed. """ self._reset_required = True def onJobStart(self, csv, path, t): """ Update the 'run' time to avoid loading old data. """ self._run_start_time = t self.onReloadWindow() def showEvent(self, *args): """ Override the widgets showEvent to load or update the window when it becomes visible """ if not self._initialized: self.onReloadWindow() else: self.onWindowRequiresUpdate() def onReaderOptionsChanged(self, options=dict()): """ Update the options for ExodusReader object. """ self.__setOptionsHelper(self._reader, options) def onResultOptionsChanged(self, options=dict()): """ Update the options for ExodusResult object. """ self.__setOptionsHelper(self._result, options) def onWindowOptionsChanged(self, options=dict()): """v Update the options for RenderWindow object. """ self.__setOptionsHelper(self._window, options) def onAppendResult(self, result): """ Appends a result object (e.g., ColorBar) to the ExodusWindow object """ self._window.append(result) def onRemoveResult(self, result): """ Removes a result object (e.g., ColorBar) to the ExodusWindow object """ self._window.pop(result) def resizeEvent(self, event): """ Reset the camera for the colorbar so it positioned correctly. This is QWidget method that is called when the window is resized. Args: event[QResizeEvent]: Not used """ super(VTKWindowPlugin, self).resizeEvent(event) if self._result and not self._window.needsInitialize(): try: self._result.update() except OSError: pass # no file exists0 def onCurrentChanged(self, index): """ Called when the tab is changed. Inputs: index[int]: The index of the active tab. """ active = self._index == index # If the tab is active and initialized then start auto updating if active and self._initialized: self._adjustTimers(start=['update'], stop=['initialize']) # If the tab is active and not initialized then start the initialize timer elif active and not self._initialized: self._adjustTimers(start=['initialize'], stop=['update']) # Turn off times if the tab is not active else: self._adjustTimers(stop=['initialize', 'update']) def onWindowRequiresUpdate(self, *args): """ Updates the VTK render window. """ if not self._initialized: return # Try to preform an update, if the file disappears startup the initialization timer again and remove results try: if self._window.needsUpdate(): self._window.update() self.windowUpdated.emit() except Exception: mooseutils.mooseDebug('Failed to update VTK window.', traceback=True) self.reset() def onCameraChanged(self, camera): """ Update the camera, this will be connected to the cameraChanged signal from other VTKWindowPlugin instances. """ if self._result: self._result.getVTKRenderer().GetActiveCamera().DeepCopy(camera) self._window.update() def onHighlight(self, block=None, boundary=None, nodeset=None): """ Highlight the desired block/boundary/nodeset. To remove highlight call this function without the inputs set. Args: block[list]: List of block ids to highlight. boundary[list]: List of boundary ids to highlight. nodeset[list]: List of nodeset ids to highlight. """ if not self._highlight: self._highlight = chigger.exodus.ExodusResult(self._reader, renderer=self._result.getVTKRenderer(), color=[1,0,0]) if block or boundary or nodeset: self._highlight.setOptions(block=block, boundary=boundary, nodeset=nodeset) self._highlight.setOptions(edges=True, edge_width=3, edge_color=[1,0,0]) self.onAppendResult(self._highlight) else: self._highlight.reset() self.onRemoveResult(self._highlight) self.onWindowRequiresUpdate() def onWrite(self, filename): """ Produce a *.png image of the figure. """ if filename.endswith('.py'): self.parent().write(filename) else: self._window.write(filename) def _adjustTimers(self, start=[], stop=[]): """ Helper method for starting/stoping timers. """ for s in start: self._timers[s].start() for s in stop: self._timers[s].stop() def repr(self): """ Produce a script for reproducing the VTK figure. """ # The content to return output = dict() # Get the reader and result options reader_options, reader_sub_options = self._reader.options().toScriptString() result_options, result_sub_options = self._result.options().toScriptString() # Remove filters (this is handled by the ExodusPluginManager) for opt in result_options: if opt.startswith('filters='): result_options.remove(opt) # Define the imports output['imports'] = ['import vtk', 'import chigger'] # Define the camera output['camera'] = ['camera = vtk.vtkCamera()'] output['camera'] += chigger.utils.print_camera(self._result.getVTKRenderer().GetActiveCamera()) result_options.append('camera=camera') output['reader'] = ['reader = chigger.exodus.ExodusReader({})'.format(repr(os.path.relpath(self._reader.filename())))] if reader_options: output['reader'] += ['reader.setOptions({})'.format(', '.join(reader_options))] for key, value in reader_sub_options.iteritems(): output['reader'] += ['reader.setOptions({}, {})'.format(repr(key), ', '.join(value))] output['result'] = ['result = chigger.exodus.ExodusResult(reader)'] if result_options: output['result'] += ['result.setOptions({})'.format(', '.join(result_options))] for key, value in result_sub_options.iteritems(): output['result'] += ['result.setOptions({}, {})'.format(repr(key), ', '.join(value))] return output def _cameraModifiedCallback(self, *args): """ Emits cameraChanged signal when the camera is modified. """ self.cameraChanged.emit(self._result.getVTKRenderer().GetActiveCamera()) def __setOptionsHelper(self, chigger_object, options): """ Private helper for setting chigger options. Inputs: chigger_object[ChiggerObject]: An object supporting setOptions calls. options[dict | chigger.utils.Options]: The options to set. """ if chigger_object: if isinstance(options, dict): chigger_object.setOptions(**options) elif isinstance(options, chigger.utils.Options): chigger_object.setOptions(options) else: raise mooseutils.MooseException('Options supplied must be a dict or utils.Options class.') def main(size=None): """ Run the VTKWindowPlugin all by its lonesome. """ from peacock.ExodusViewer.ExodusPluginManager import ExodusPluginManager widget = ExodusPluginManager(plugins=[lambda: VTKWindowPlugin(size=size)]) widget.show() return widget, widget.VTKWindowPlugin if __name__ == "__main__": from peacock.utils import Testing app = QtWidgets.QApplication(sys.argv) filename = Testing.get_chigger_input('mug_blocks_out.e') widget, window = main(size=[600,600]) window.initialize([filename]) window._result.update(variable='diffused') window.onWindowRequiresUpdate() sys.exit(app.exec_())
backmari/moose
python/peacock/ExodusViewer/plugins/VTKWindowPlugin.py
Python
lgpl-2.1
15,583
[ "VTK" ]
a7c76e59dc6e704f5cca9590dca728dc39402f12dde5e8917eadfe2b4465d636
# Copyright 2009 Brian Quinlan. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Implements ProcessPoolExecutor. The follow diagram and text describe the data-flow through the system: |======================= In-process =====================|== Out-of-process ==| +----------+ +----------+ +--------+ +-----------+ +---------+ | | => | Work Ids | => | | => | Call Q | => | | | | +----------+ | | +-----------+ | | | | | ... | | | | ... | | | | | | 6 | | | | 5, call() | | | | | | 7 | | | | ... | | | | Process | | ... | | Local | +-----------+ | Process | | Pool | +----------+ | Worker | | #1..n | | Executor | | Thread | | | | | +----------- + | | +-----------+ | | | | <=> | Work Items | <=> | | <= | Result Q | <= | | | | +------------+ | | +-----------+ | | | | | 6: call() | | | | ... | | | | | | future | | | | 4, result | | | | | | ... | | | | 3, except | | | +----------+ +------------+ +--------+ +-----------+ +---------+ Executor.submit() called: - creates a uniquely numbered _WorkItem and adds it to the "Work Items" dict - adds the id of the _WorkItem to the "Work Ids" queue Local worker thread: - reads work ids from the "Work Ids" queue and looks up the corresponding WorkItem from the "Work Items" dict: if the work item has been cancelled then it is simply removed from the dict, otherwise it is repackaged as a _CallItem and put in the "Call Q". New _CallItems are put in the "Call Q" until "Call Q" is full. NOTE: the size of the "Call Q" is kept small because calls placed in the "Call Q" can no longer be cancelled with Future.cancel(). - reads _ResultItems from "Result Q", updates the future stored in the "Work Items" dict and deletes the dict entry Process #1..n: - reads _CallItems from "Call Q", executes the calls, and puts the resulting _ResultItems in "Request Q" """ import atexit import multiprocessing import threading import weakref import jega from jega.futures._base import Executor, Future, InfiniteHandler from jega.six.moves import queue __author__ = 'Brian Quinlan (brian@sweetapp.com)' # Workers are created as daemon threads and processes. This is done to allow the # interpreter to exit when there are still idle processes in a # ProcessPoolExecutor's process pool (i.e. shutdown() was not called). However, # allowing workers to die with the interpreter has two undesirable properties: # - The workers would still be running during interpretor shutdown, # meaning that they would fail in unpredictable ways. # - The workers could be killed while evaluating a work item, which could # be bad if the callable being evaluated has external side-effects e.g. # writing to a file. # # To work around this problem, an exit handler is installed which tells the # workers to exit when their work queues are empty and then waits until the # threads/processes finish. _thread_references = set() _shutdown = False def _python_exit(): global _shutdown _shutdown = True for thread_reference in _thread_references: thread = thread_reference() if thread is not None: thread.join() def _remove_dead_thread_references(): """Remove inactive threads from _thread_references. Should be called periodically to prevent memory leaks in scenarios such as: >>> while True: >>> ... t = ThreadPoolExecutor(max_workers=5) >>> ... t.map(int, ['1', '2', '3', '4', '5']) """ for thread_reference in set(_thread_references): if thread_reference() is None: _thread_references.discard(thread_reference) # Controls how many more calls than processes will be queued in the call queue. # A smaller number will mean that processes spend more time idle waiting for # work while a larger number will make Future.cancel() succeed less frequently # (Futures in the call queue cannot be cancelled). EXTRA_QUEUED_CALLS = 1 class _WorkItem(object): def __init__(self, future, fn, args, kwargs): self.future = future self.fn = fn self.args = args self.kwargs = kwargs self.loop = evergreen.current.loop # Keep the loop alive while this work item is queued self.handler = InfiniteHandler(self.loop) self._event = threading.Event() self._cancelled = False def is_cancelled(self): self.loop.call_from_thread(self._set_running) self._event.wait() return self._cancelled def _set_running(self): if not self.future.set_running_or_notify_cancel(): self.handler.cancel() self._cancelled = True self._event.set() class _ResultItem(object): def __init__(self, work_id, exception=None, result=None): self.work_id = work_id self.exception = exception self.result = result class _CallItem(object): __slots__ = ('work_id', 'fn', 'args', 'kwargs') def __init__(self, work_id, fn, args, kwargs): self.work_id = work_id self.fn = fn self.args = args self.kwargs = kwargs def __call__(self): return self.fn(*self.args, **self.kwargs) def _process_worker(call_queue, result_queue, shutdown): """Evaluates calls from call_queue and places the results in result_queue. This worker is run in a seperate process. Args: call_queue: A multiprocessing.Queue of _CallItems that will be read and evaluated by the worker. result_queue: A multiprocessing.Queue of _ResultItems that will written to by the worker. shutdown: A multiprocessing.Event that will be set as a signal to the worker that it should exit when call_queue is empty. """ while True: try: call_item = call_queue.get(block=True, timeout=0.1) except queue.Empty: if shutdown.is_set(): return else: try: r = call_item() except BaseException as e: result_queue.put(_ResultItem(call_item.work_id, exception=e)) else: result_queue.put(_ResultItem(call_item.work_id, result=r)) def _add_call_item_to_queue(pending_work_items, work_ids, call_queue): """Fills call_queue with _WorkItems from pending_work_items. This function never blocks. Args: pending_work_items: A dict mapping work ids to _WorkItems e.g. {5: <_WorkItem...>, 6: <_WorkItem...>, ...} work_ids: A queue.Queue of work ids e.g. Queue([5, 6, ...]). Work ids are consumed and the corresponding _WorkItems from pending_work_items are transformed into _CallItems and put in call_queue. call_queue: A multiprocessing.Queue that will be filled with _CallItems derived from _WorkItems. """ while True: if call_queue.full(): return try: work_id = work_ids.get(block=False) except queue.Empty: return else: work_item = pending_work_items[work_id] if not work_item.is_cancelled(): call_queue.put(_CallItem(work_id, work_item.fn, work_item.args, work_item.kwargs), block=True) else: del work_item, pending_work_items[work_id] continue def _queue_manangement_worker(executor_reference, processes, pending_work_items, work_ids_queue, call_queue, result_queue, shutdown_process_event): """Manages the communication between this process and the worker processes. This function is run in a local thread. Args: executor_reference: A weakref.ref to the ProcessPoolExecutor that owns this thread. Used to determine if the ProcessPoolExecutor has been garbage collected and that this function can exit. process: A list of the multiprocessing.Process instances used as workers. pending_work_items: A dict mapping work ids to _WorkItems e.g. {5: <_WorkItem...>, 6: <_WorkItem...>, ...} work_ids_queue: A queue.Queue of work ids e.g. Queue([5, 6, ...]). call_queue: A multiprocessing.Queue that will be filled with _CallItems derived from _WorkItems for processing by the process workers. result_queue: A multiprocessing.Queue of _ResultItems generated by the process workers. shutdown_process_event: A multiprocessing.Event used to signal the process workers that they should exit when their work queue is empty. """ while True: _add_call_item_to_queue(pending_work_items, work_ids_queue, call_queue) try: result_item = result_queue.get(block=True, timeout=0.1) except queue.Empty: executor = executor_reference() # No more work items can be added if: # - The interpreter is shutting down OR # - The executor that owns this worker has been collected OR # - The executor that owns this worker has been shutdown. if _shutdown or executor is None or executor._shutdown_thread: # Since no new work items can be added, it is safe to shutdown # this thread if there are no pending work items. if not pending_work_items: shutdown_process_event.set() # If .join() is not called on the created processes then # some multiprocessing.Queue methods may deadlock on Mac OSX. for p in processes: p.join() return del executor else: work_item = pending_work_items.pop(result_item.work_id) loop = work_item.loop loop.call_from_thread(_set_work_result, work_item, result_item) del result_item, work_item, loop def _set_work_result(work_item, result_item): loop = work_item.loop if result_item.exception: work_item.future.set_exception(result_item.exception) else: work_item.future.set_result(result_item.result) work_item.handler.cancel() class ProcessPoolExecutor(Executor): def __init__(self, max_workers=None): """Initializes a new ProcessPoolExecutor instance. Args: max_workers: The maximum number of processes that can be used to execute the given calls. If None or not given then as many worker processes will be created as the machine has processors. """ _remove_dead_thread_references() if max_workers is None: self._max_workers = multiprocessing.cpu_count() else: self._max_workers = max_workers # Make the call queue slightly larger than the number of processes to # prevent the worker processes from idling. But don't make it too big # because futures in the call queue cannot be cancelled. self._call_queue = multiprocessing.Queue(self._max_workers + EXTRA_QUEUED_CALLS) self._result_queue = multiprocessing.Queue() self._work_ids = queue.Queue() self._queue_management_thread = None self._processes = set() # Shutdown is a two-step process. self._shutdown_thread = False self._shutdown_process_event = multiprocessing.Event() self._shutdown_lock = threading.Lock() self._queue_count = 0 self._pending_work_items = {} def _start_queue_management_thread(self): if self._queue_management_thread is None: self._queue_management_thread = threading.Thread( target=_queue_manangement_worker, args=(weakref.ref(self), self._processes, self._pending_work_items, self._work_ids, self._call_queue, self._result_queue, self._shutdown_process_event)) self._queue_management_thread.daemon = True self._queue_management_thread.start() _thread_references.add(weakref.ref(self._queue_management_thread)) def _adjust_process_count(self): for _ in range(len(self._processes), self._max_workers): p = multiprocessing.Process( target=_process_worker, args=(self._call_queue, self._result_queue, self._shutdown_process_event)) p.start() self._processes.add(p) def submit(self, fn, *args, **kwargs): with self._shutdown_lock: if self._shutdown_thread: raise RuntimeError('cannot schedule new futures after shutdown') f = Future() w = _WorkItem(f, fn, args, kwargs) self._pending_work_items[self._queue_count] = w self._work_ids.put(self._queue_count) self._queue_count += 1 self._start_queue_management_thread() self._adjust_process_count() return f submit.__doc__ = Executor.submit.__doc__ def shutdown(self, wait=True): with self._shutdown_lock: self._shutdown_thread = True if wait: if self._queue_management_thread: self._queue_management_thread.join() # To reduce the risk of openning too many files, remove references to # objects that use file descriptors. self._queue_management_thread = None self._call_queue = None self._result_queue = None self._shutdown_process_event = None self._processes = None shutdown.__doc__ = Executor.shutdown.__doc__ atexit.register(_python_exit)
mopemope/jega
jega/futures/_process.py
Python
bsd-3-clause
14,903
[ "Brian" ]
dfb5f791c476392b535c032000f7994d7a4e8a2f8811685f476bee2b1b685c60
# # Copyright 2016 The BigDL Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from bigdl.orca.tfpark.estimator import * from bert import modeling def bert_model(features, labels, mode, params): """ Return an instance of BertModel and one can take its different outputs to perform specific tasks. """ import tensorflow as tf input_ids = features["input_ids"] if "input_mask" in features: input_mask = features["input_mask"] else: input_mask = None if "token_type_ids" in features: token_type_ids = features["token_type_ids"] else: token_type_ids = None bert_config = modeling.BertConfig.from_json_file(params["bert_config_file"]) model = modeling.BertModel( config=bert_config, is_training=(mode == tf.estimator.ModeKeys.TRAIN), input_ids=input_ids, input_mask=input_mask, token_type_ids=token_type_ids, use_one_hot_embeddings=params["use_one_hot_embeddings"]) tvars = tf.trainable_variables() if params["init_checkpoint"]: assignment_map, initialized_variable_names = \ modeling.get_assignment_map_from_checkpoint(tvars, params["init_checkpoint"]) tf.train.init_from_checkpoint(params["init_checkpoint"], assignment_map) return model def bert_input_fn(rdd, max_seq_length, batch_size, features={"input_ids", "input_mask", "token_type_ids"}, extra_features=None, labels=None, label_size=None): """ Takes an RDD to create the input function for BERT related TFEstimators. For training and evaluation, each element in rdd should be a tuple: (dict of features, a single label or dict of labels) Note that currently only integer or integer array labels are supported. For prediction, each element in rdd should be a dict of features. Features in each RDD element should contain "input_ids", "input_mask" and "token_type_ids", each of shape max_seq_length. If you have other extra features in your dict of features, you need to explicitly specify the argument `extra_features`, which is supposed to be the dict with feature name as key and tuple of (dtype, shape) as its value. """ import tensorflow as tf assert features.issubset({"input_ids", "input_mask", "token_type_ids"}) features_dict = {} for feature in features: features_dict[feature] = (tf.int32, [max_seq_length]) if extra_features is not None: assert isinstance(extra_features, dict), "extra_features should be a dictionary" for k, v in extra_features.items(): assert isinstance(k, six.string_types) assert isinstance(v, tuple) features_dict[k] = v if label_size is None: label_size = [] else: label_size = [label_size] if labels is None: res_labels = (tf.int32, label_size) elif isinstance(labels, list) or isinstance(labels, set): labels = set(labels) if len(labels) == 1: res_labels = (tf.int32, label_size) else: res_labels = {} for label in labels: res_labels[label] = (tf.int32, label_size) else: raise ValueError("Wrong labels. " "labels should be a set of label names if you have multiple labels") def input_fn(mode): if mode == tf.estimator.ModeKeys.TRAIN: return TFDataset.from_rdd(rdd, features=features_dict, labels=res_labels, batch_size=batch_size) elif mode == tf.estimator.ModeKeys.EVAL: return TFDataset.from_rdd(rdd, features=features_dict, labels=res_labels, batch_per_thread=batch_size // rdd.getNumPartitions()) else: return TFDataset.from_rdd(rdd, features=features_dict, batch_per_thread=batch_size // rdd.getNumPartitions()) return input_fn class BERTBaseEstimator(TFEstimator): """ The base class for BERT related TFEstimators. Common arguments: bert_config_file, init_checkpoint, use_one_hot_embeddings, optimizer, model_dir. For its subclass: - One can add additional arguments and access them via `params`. - One can utilize `_bert_model` to create model_fn and `bert_input_fn` to create input_fn. """ def __init__(self, model_fn, bert_config_file, init_checkpoint=None, use_one_hot_embeddings=False, model_dir=None, **kwargs): import tensorflow as tf params = {"bert_config_file": bert_config_file, "init_checkpoint": init_checkpoint, "use_one_hot_embeddings": use_one_hot_embeddings} for k, v in kwargs.items(): params[k] = v estimator = tf.estimator.Estimator(model_fn, model_dir=model_dir, params=params) super(BERTBaseEstimator, self).__init__(estimator)
intel-analytics/BigDL
python/orca/src/bigdl/orca/tfpark/text/estimator/bert_base.py
Python
apache-2.0
5,661
[ "ORCA" ]
2e8f7dc44144ee639d5915b88bb76c3756b109cb6c1bfcec21b631c556bcbc3b
# # Copyright 2016 The BigDL Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import pickle import argparse from pyspark.sql.functions import array from bigdl.orca import init_orca_context, stop_orca_context from bigdl.orca.data.file import exists, makedirs from bigdl.friesian.feature import FeatureTable from bigdl.orca.learn.tf2.estimator import Estimator from model import * spark_conf = {"spark.network.timeout": "10000000", "spark.sql.broadcastTimeout": "7200", "spark.sql.shuffle.partitions": "2000", "spark.locality.wait": "0s", "spark.sql.hive.filesourcePartitionFileCacheSize": "4096000000", "spark.sql.crossJoin.enabled": "true", "spark.serializer": "org.apache.spark.serializer.KryoSerializer", "spark.kryo.unsafe": "true", "spark.kryoserializer.buffer.max": "1024m", "spark.task.cpus": "1", "spark.executor.heartbeatInterval": "200s", "spark.driver.maxResultSize": "40G", "spark.eventLog.enabled": "true", "spark.app.name": "recsys-2tower", "spark.executor.memoryOverhead": "120g"} def train(config, train_tbl, test_tbl, epochs=1, batch_size=128, model_dir='.'): two_tower = TwoTowerModel(config["user_col_info"], config["item_col_info"]) def model_creator(config): model = two_tower.build_model() print(model.summary()) optimizer = tf.keras.optimizers.Adam(config["lr"]) model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['binary_accuracy', 'Recall', 'AUC']) return model estimator = Estimator.from_keras(model_creator=model_creator, verbose=False, config=config) callbacks = [] from tensorflow.keras.callbacks import EarlyStopping callbacks.append(EarlyStopping(monitor='val_auc', mode='max', verbose=1, patience=5)) train_count, test_count = train_tbl.size(), test_tbl.size() train_df, test_df = train_tbl.df, test_tbl.df steps_per_epoch = math.ceil(train_count / batch_size) val_steps = math.ceil(test_count / batch_size) feature_cols = config["user_col_info"].get_name_list() + config["item_col_info"].get_name_list() print("Total number of train records: {}".format(train_count)) print("Total number of val records: {}".format(test_count)) estimator.fit(train_df, epochs=epochs, batch_size=batch_size, feature_cols=feature_cols, label_cols=['label'], callbacks=callbacks, validation_data=test_df, steps_per_epoch=steps_per_epoch, validation_steps=val_steps) model = estimator.get_model() user_model = get_1tower_model(model, two_tower.user_col_info) item_model = get_1tower_model(model, two_tower.item_col_info) tf.saved_model.save(model, os.path.join(model_dir, "twotower-model")) tf.saved_model.save(user_model, os.path.join(model_dir, "user-model")) tf.saved_model.save(item_model, os.path.join(model_dir, "item-model")) estimator.save(os.path.join(model_dir, "twotower_model.ckpt")) print("saved models") return estimator def prepare_features(train_tbl, test_tbl, reindex_tbls): def add_ratio_features(tbl): cal_ratio = (lambda x: x[1] / x[0] if x[0] > 0 else 0.0) tbl = tbl.apply(["engaged_with_user_follower_count", "engaged_with_user_following_count"], "engaged_with_user_follower_following_ratio", cal_ratio, "float")\ .apply(["enaging_user_follower_count", "enaging_user_following_count"], "enaging_user_follower_following_ratio", cal_ratio, "float") return tbl def organize_cols(tbl): tbl = tbl.select(array("enaging_user_follower_count", "enaging_user_following_count", "enaging_user_follower_following_ratio").alias("user_num"), array("len_hashtags", "len_domains", "len_links", "engaged_with_user_follower_count", "engaged_with_user_following_count", "engaged_with_user_follower_following_ratio").alias("item_num"), *cat_cols, *embed_cols, "label") return tbl print("reindexing embedding cols") train_tbl = train_tbl.reindex(embed_cols, reindex_tbls) test_tbl = test_tbl.reindex(embed_cols, reindex_tbls) embed_in_dims = {} for i, c, in enumerate(embed_cols): embed_in_dims[c] = max(reindex_tbls[i].df.agg({c+"_new": "max"}).collect()[0]) print("add ratio features") train_tbl = add_ratio_features(train_tbl) test_tbl = add_ratio_features(test_tbl) print("scale numerical features") train_tbl, min_max_dic = train_tbl.min_max_scale(num_cols + ratio_cols) test_tbl = test_tbl.transform_min_max_scale(num_cols + ratio_cols, min_max_dic) stats_dir = os.path.join(args.model_dir, 'stats') if not exists(stats_dir): makedirs(stats_dir) with open(os.path.join(stats_dir, "min_max.pkl"), 'wb') as f: pickle.dump(min_max_dic, f) user_col_info = ColumnInfoTower(indicator_cols=["enaging_user_is_verified"], indicator_dims=[2], embed_cols=["enaging_user_id"], embed_in_dims=[embed_in_dims["enaging_user_id"]], embed_out_dims=[16], numerical_cols=["user_num"], numerical_dims=[3], name="user") item_col_info = ColumnInfoTower(indicator_cols=["engaged_with_user_is_verified", "present_media", "tweet_type", "language"], indicator_dims=[2, 13, 3, 67], # max + 1 embed_cols=["engaged_with_user_id", "hashtags", "present_links", "present_domains"], embed_in_dims=[embed_in_dims["engaged_with_user_id"], embed_in_dims["hashtags"], embed_in_dims["present_links"], embed_in_dims["present_domains"]], embed_out_dims=[16, 16, 16, 16], numerical_cols=["item_num"], numerical_dims=[6], name="item") print("organize columns and specify user_col_info and item_col_info") train_tbl = organize_cols(train_tbl) test_tbl = organize_cols(test_tbl) return train_tbl, test_tbl, user_col_info, item_col_info if __name__ == '__main__': parser = argparse.ArgumentParser(description='Two Tower Training/Inference') parser.add_argument('--cluster_mode', type=str, default="local", help='The cluster mode, such as local, yarn, standalone or spark-submit.') parser.add_argument('--master', type=str, default=None, help='The master url, only used when cluster mode is standalone.') parser.add_argument('--executor_cores', type=int, default=8, help='The executor core number.') parser.add_argument('--executor_memory', type=str, default="160g", help='The executor memory.') parser.add_argument('--num_executor', type=int, default=8, help='The number of executor.') parser.add_argument('--driver_cores', type=int, default=4, help='The driver core number.') parser.add_argument('--driver_memory', type=str, default="36g", help='The driver memory.') parser.add_argument('--lr', default=0.001, type=float, help='learning rate') parser.add_argument('--epochs', default=1, type=int, help='train epoch') parser.add_argument('--batch_size', default=8000, type=int, help='batch size') parser.add_argument('--model_dir', default='snapshot', type=str, help='snapshot directory name (default: snapshot)') parser.add_argument('--data_dir', type=str, help='data directory') parser.add_argument('--frequency_limit', type=int, default=25, help='frequency limit') args = parser.parse_args() if args.cluster_mode == "local": sc = init_orca_context("local", init_ray_on_spark=True) elif args.cluster_mode == "standalone": sc = init_orca_context("standalone", master=args.master, cores=args.executor_cores, num_nodes=args.num_executor, memory=args.executor_memory, driver_cores=args.driver_cores, driver_memory=args.driver_memory, conf=spark_conf, init_ray_on_spark=True) elif args.cluster_mode == "yarn": sc = init_orca_context("yarn-client", cores=args.executor_cores, num_nodes=args.num_executor, memory=args.executor_memory, driver_cores=args.driver_cores, driver_memory=args.driver_memory, conf=spark_conf, extra_python_lib="model.py", object_store_memory="80g", init_ray_on_spark=True) elif args.cluster_mode == "spark-submit": sc = init_orca_context("spark-submit") else: raise ValueError( "cluster_mode should be one of 'local', 'yarn', 'standalone' and 'spark-submit'" ", but got " + args.cluster_mode) num_cols = ["enaging_user_follower_count", 'enaging_user_following_count', "engaged_with_user_follower_count", "engaged_with_user_following_count", "len_hashtags", "len_domains", "len_links", "hashtags", "present_links", "present_domains"] cat_cols = ["engaged_with_user_is_verified", "enaging_user_is_verified", "present_media", "tweet_type", "language"] ratio_cols = ["engaged_with_user_follower_following_ratio", "enaging_user_follower_following_ratio"] embed_cols = ["enaging_user_id", "engaged_with_user_id", "hashtags", "present_links", "present_domains"] useful_cols = num_cols + cat_cols + embed_cols train_tbl = FeatureTable.read_parquet(args.data_dir + "/train_parquet") test_tbl = FeatureTable.read_parquet(args.data_dir + "/test_parquet") full_tbl = train_tbl.concat(test_tbl, "outer") reindex_tbls = full_tbl.gen_reindex_mapping(embed_cols, freq_limit=args.frequency_limit) train_tbl, test_tbl, user_info, item_info = prepare_features(train_tbl, test_tbl, reindex_tbls) output_dir = args.data_dir + "/embed_reindex/" for i, c in enumerate(embed_cols): reindex_tbls[i].write_parquet(output_dir + "_" + c) train_config = {"lr": 1e-3, "user_col_info": user_info, "item_col_info": item_info, "inter_op_parallelism": 4, "intra_op_parallelism": args.executor_cores} train(train_config, train_tbl, test_tbl, epochs=args.epochs, batch_size=args.batch_size, model_dir=args.model_dir) stop_orca_context()
intel-analytics/BigDL
python/friesian/example/two_tower/train_2tower.py
Python
apache-2.0
12,147
[ "ORCA" ]
a0d580bdfe282ba147947069bc7e3fac42429aa019ff0aa4517fddc1fb775ba8
# # Copyright (C) 2004-2006 Rational Discovery LLC # # @@ All Rights Reserved @@ # This file is part of the RDKit. # The contents are covered by the terms of the BSD license # which is included in the file license.txt, found at the root # of the RDKit source tree. # class ExcludedVolume(object): def __init__(self, featInfo, index=-1, exclusionDist=3.0): """ featInfo should be a sequence of ([indices],min,max) tuples """ self.index = index try: _ = len(featInfo) except TypeError: raise ValueError('featInfo argument must be a sequence of sequences') if not len(featInfo): raise ValueError('featInfo argument must non-empty') try: a, b, c = featInfo[0] except (TypeError, ValueError): raise ValueError('featInfo elements must be 3-sequences') self.featInfo = featInfo[:] self.exclusionDist = exclusionDist self.pos = None
rvianello/rdkit
rdkit/Chem/Pharm3D/ExcludedVolume.py
Python
bsd-3-clause
918
[ "RDKit" ]
f6bb503205c6b20bea58001dc712cae4fdfeba2d79c2ebca5ce60142166be7ed
""" Callback when a staging operation is finished """ from DIRAC import S_OK from DIRAC.RequestManagementSystem.private.OperationHandlerBase import OperationHandlerBase from DIRAC.WorkloadManagementSystem.Client.JobStateUpdateClient import JobStateUpdateClient class StagingCallback(OperationHandlerBase): """ .. class:: StagingCallback This performs the 'Done' callback to a job waiting for the staging to finish Currently, we cannot store the JobID in the field reserved in the Request, because then our crapy finalization system will try updating the job (minor) status So we store the job ID in the Argument field of operation """ def __init__(self, operation=None, csPath=None): """c'tor :param Operation operation: an Operation instance :param str csPath: CS path for this handler """ super(StagingCallback, self).__init__(operation, csPath) def __call__(self): """update the job status""" # # decode arguments jobID = self.operation.Arguments self.log.info("Performing callback to job %s" % jobID) res = JobStateUpdateClient().updateJobFromStager(jobID, "Done") if not res["OK"]: self.log.error("Error performing the callback to the job", res) return res self.operation.Status = "Done" self.log.info("Callback from staging done") return S_OK()
DIRACGrid/DIRAC
src/DIRAC/DataManagementSystem/Agent/RequestOperations/StagingCallback.py
Python
gpl-3.0
1,446
[ "DIRAC" ]
b6a37eaba21fd616497114ed75f984bb5cb9c81bfe2c4f4192abb5a61e81342f
# et.po val = {" days." : " päeva.", "(all)" : "(kõik)", "(any)" : "(iga)", "(anyone)" : "(puudub)", "(available)" : "", "(blank)" : "", "(both)" : "(mõlemad)", "(everyone)" : "", "(master user, not editable)" : "", "(no change)" : "", "(no deduction)" : "", "(none)" : "(puudub)", "(unknown)" : "", "(use system)" : "", "({0} given, {1} remaining)" : "", "1 treatment" : "", "1 week" : "1 nädal", "1 year" : "", "2 weeks" : "2 nädalat", "3 months" : "kuud", "4 weeks" : "4 nädalat", "5 Year" : "", "6 months" : "kuud", "6 weeks" : "6 nädalat", "8 weeks" : "8 nädalat", "9 months" : "kuud", "A (Stray Dog)" : "", "A description or other information about the animal" : "", "A list of areas this person will homecheck - eg: S60 S61" : "", "A movement must have a reservation date or type." : "", "A person is required for this movement type." : "", "A publish job is already running." : "", "A short version of the reference number" : "", "A task is already running." : "", "A unique number to identify this movement" : "", "A unique reference for this litter" : "", "A4" : "", "ACO" : "", "AM" : "", "ASM" : "", "ASM 3 is compatible with your iPad and other tablets." : "", "ASM News" : "", "ASM can track detailed monthly and annual figures for your shelter. Install the Monthly Figures and Annual Figures reports from Settings-Reports-Browse sheltermanager.com" : "", "ASM comes with a dictionary of 4,000 animal names. Just click the generate random name button when adding an animal." : "", "ASM will remove this animal from the waiting list after a set number of weeks since the last owner contact date." : "", "Abandoned" : "Hüljatud", "Abuse" : "Väärkohtlemine", "Abyssinian" : "Abyssinian", "Access System Menu" : "", "Account" : "", "Account Types" : "", "Account code '{0}' has already been used." : "", "Account code '{0}' is not valid." : "", "Account code cannot be blank." : "", "Account disabled." : "", "Accountant" : "", "Accounts" : "", "Accounts need a code." : "", "Active" : "Aktiivne", "Active Incidents" : "", "Active Trap Loans" : "", "Active users: {0}" : "", "Add" : "", "Add Accounts" : "", "Add Animal" : "Lisa loomi", "Add Animals" : "Lisa loomi", "Add Appointment" : "", "Add Call" : "", "Add Citations" : "Uus vaktsineerimine", "Add Clinic Appointment" : "", "Add Cost" : "", "Add Diary" : "", "Add Diets" : "", "Add Document to Repository" : "", "Add Flag" : "", "Add Found Animal" : "Leia leitud loom", "Add Incidents" : "", "Add Investigation" : "", "Add Invoice Item" : "", "Add Licenses" : "", "Add Litter" : "", "Add Log" : "", "Add Log to Animal" : "Leia kadunud loom", "Add Lost Animal" : "Leia kadunud loom", "Add Media" : "", "Add Medical Records" : "", "Add Message" : "", "Add Movement" : "", "Add Payments" : "", "Add Person" : "", "Add Report" : "", "Add Rota" : "", "Add Stock" : "", "Add Tests" : "", "Add Transport" : "", "Add Trap Loans" : "", "Add Users" : "", "Add Vaccinations" : "Uus vaktsineerimine", "Add Vouchers" : "", "Add Waiting List" : "Vaata ootenimekirja", "Add a diary note" : "", "Add a found animal" : "Leia leitud loom", "Add a log entry" : "", "Add a lost animal" : "Lisa uus loom", "Add a medical regimen" : "", "Add a new animal" : "Lisa uus loom", "Add a new log" : "", "Add a new person" : "", "Add a person" : "", "Add a photo" : "", "Add a test" : "", "Add a vaccination" : "Uus vaktsineerimine", "Add account" : "", "Add additional field" : "", "Add an animal to the waiting list" : "", "Add citation" : "Uus vaktsineerimine", "Add cost" : "", "Add details of this email to the log after sending" : "", "Add diary" : "", "Add diary task" : "Muuda päeviku ülesandeid", "Add diet" : "", "Add extra images for use in reports and documents" : "", "Add form field" : "", "Add found animal" : "Leia leitud loom", "Add investigation" : "", "Add license" : "", "Add litter" : "", "Add log" : "", "Add lost animal" : "Lisa uus loom", "Add medical profile" : "", "Add medical regimen" : "", "Add message" : "", "Add movement" : "", "Add online form" : "", "Add payment" : "", "Add person" : "", "Add report" : "", "Add role" : "", "Add rota item" : "", "Add stock" : "", "Add template" : "", "Add test" : "", "Add this text to all animal descriptions" : "", "Add to log" : "", "Add transport" : "", "Add trap loan" : "", "Add user" : "", "Add vaccination" : "Uus vaktsineerimine", "Add voucher" : "", "Add waiting list" : "Vaata ootenimekirja", "Add {0}" : "", "Added" : "", "Added by {0} on {1}" : "", "Additional" : "Täiendav", "Additional Fields" : "", "Additional date field '{0}' contains an invalid date." : "", "Additional fields" : "", "Additional fields need a name, label and type." : "", "Address" : "Aadress", "Address Contains" : "", "Address contains" : "", "Administered" : "", "Administering Vet" : "", "Adopt" : "", "Adopt an animal" : "", "Adoptable" : "Loovutatav", "Adoptable Animal" : "", "Adoptable and published for the first time" : "", "Adopted" : "Loovutatud", "Adopted Animals" : "Lisa loomi", "Adopted Transferred In {0}" : "", "Adoption" : "Loovutus", "Adoption Coordinator" : "", "Adoption Coordinator and Fosterer" : "", "Adoption Event" : "Loovutustasu", "Adoption Fee" : "Loovutustasu", "Adoption Number" : "", "Adoption fee donations" : "", "Adoption movements must have a valid adoption date." : "", "Adoption successfully created." : "", "Adoptions {0}" : "", "Adult" : "Täiskasvanu", "Advanced" : "Laiendatud", "Advanced find animal screen defaults to on shelter" : "", "Affenpinscher" : "Affenpinscher", "Afghan Hound" : "Afghan Hound", "African Grey" : "African Grey", "After the user presses submit and ASM has accepted the form, redirect the user to this URL" : "", "Age" : "Vanus", "Age Group" : "Vanusegrupid", "Age Group 1" : "Vanusegrupid", "Age Group 2" : "Vanusegrupid", "Age Group 3" : "Vanusegrupid", "Age Group 4" : "Vanusegrupid", "Age Group 5" : "Vanusegrupid", "Age Group 6" : "Vanusegrupid", "Age Group 7" : "Vanusegrupid", "Age Group 8" : "Vanusegrupid", "Age Groups" : "Vanusegrupid", "Age groups are assigned based on the age of an animal. The figure in the left column is the upper limit in years for that group." : "", "Aged Between" : "", "Aged From" : "", "Aged To" : "", "Aggression" : "", "Airedale Terrier" : "Airedale Terrier", "Akbash" : "Akbash", "Akita" : "Akita", "Alaskan Malamute" : "Alaskan Malamute", "Alerts" : "", "All Animals" : "&Kõik loomad", "All On-Shelter Animals" : "Mitte-varjupaiga loom.", "All Publishers" : "", "All accounts" : "", "All animal care officers on file." : "", "All animal shelters on file." : "", "All animals matching current publishing options." : "", "All animals on the shelter." : "", "All animals where the hold ends today." : "", "All animals who are currently held in case of reclaim." : "", "All animals who are currently quarantined." : "", "All animals who are flagged as not for adoption." : "", "All animals who have been on the shelter longer than {0} months." : "", "All animals who have not been microchipped" : "", "All banned owners on file." : "", "All diary notes" : "Muuda mu päeviku märkmeid", "All donors on file." : "", "All drivers on file." : "", "All existing data in your database will be REMOVED before importing the CSV file. This removal cannot be reversed." : "", "All fields should be completed." : "", "All fosterers on file." : "", "All homechecked owners on file." : "", "All homecheckers on file." : "", "All members on file." : "", "All notes upto today" : "", "All people on file." : "", "All retailers on file." : "", "All staff on file." : "", "All time" : "", "All vets on file." : "", "All volunteers on file." : "", "Allergies" : "Allergia", "Allow a fosterer to be selected" : "", "Allow an adoption coordinator to be selected" : "", "Allow creation of payments on the Move-Reserve screen" : "", "Allow drag and drop to move animals between locations" : "", "Allow duplicate license numbers" : "", "Allow duplicate microchip numbers" : "", "Allow overriding of the movement number on the Move menu screens" : "", "Allow use of OpenOffice document templates" : "", "Alphabetically A-Z" : "", "Alphabetically Z-A" : "", "Already Signed" : "", "Already fostered to this person." : "", "Altered" : "", "Altered Date" : "", "Altered Dog - 1 year" : "", "Altered Dog - 3 year" : "", "Altering Vet" : "", "Always show an emblem to indicate the current location" : "", "Amazon" : "Amazon", "Amber" : "", "American" : "American", "American Bulldog" : "American Bulldog", "American Curl" : "American Curl", "American Eskimo Dog" : "American Eskimo Dog", "American Fuzzy Lop" : "American Fuzzy Lop", "American Sable" : "American Sable", "American Shorthair" : "American Shorthair", "American Staffordshire Terrier" : "American Staffordshire Terrier", "American Water Spaniel" : "American Water Spaniel", "American Wirehair" : "American Wirehair", "Amount" : "Kogus", "An age in years, eg: 1, 0.5" : "", "An animal cannot have multiple open movements." : "", "An optional comma separated list of email addresses to send the output of this report to" : "", "Anatolian Shepherd" : "Anatolian Shepherd", "Angora Rabbit" : "Angora Rabbit", "Animal" : "Loom", "Animal '{0}' created with code {1}" : "", "Animal '{0}' successfully marked deceased." : "", "Animal (optional)" : "", "Animal (via animalname field)" : "", "Animal - Additional" : "", "Animal - Death" : "", "Animal - Details" : "", "Animal - Entry" : "", "Animal - Health and Identification" : "", "Animal - Notes" : "", "Animal Codes" : "Loomade koodid", "Animal Control" : "", "Animal Control Caller" : "", "Animal Control Incident" : "", "Animal Control Officer" : "", "Animal Control Victim" : "", "Animal Emblems" : "", "Animal Flags" : "", "Animal Links" : "", "Animal Name" : "Looma nimi", "Animal Selection" : "", "Animal Shelter Manager" : "Animal Shelter Manager Login", "Animal Shelter Manager Login" : "Animal Shelter Manager Login", "Animal Sponsorship" : "Looma sponsoreerimine", "Animal Type" : "Looma liik", "Animal Types" : "Looma tüübid", "Animal board costs" : "", "Animal cannot be deceased before it was brought to the shelter" : "", "Animal code format" : "", "Animal comments MUST contain this phrase in order to match." : "Märkmed looma kohta, mida soovid näha dokumentides", "Animal control calendar" : "", "Animal control incidents matching '{0}'." : "", "Animal defecation" : "", "Animal descriptions" : "", "Animal destroyed" : "", "Animal emblems are the little icons that appear next to animal names in shelter view, the home page and search results." : "", "Animal food costs" : "", "Animal picked up" : "", "Animal shortcode format" : "", "Animals" : "Loom", "Animals at large" : "", "Animals left in vehicle" : "", "Animals matching '{0}'." : "", "Animals per page" : "", "Annual" : "Igal aastal", "Annually" : "Igal aastal", "Anonymize" : "", "Anonymize personal data after this many years" : "", "Any animal types, species, breeds, colors, locations, etc. in the CSV file that aren't already in the database will be created during the import." : "", "Any health problems the animal has" : "Looma terviseprobeemid", "Any information about the animal" : "", "Any markings or distinguishing features the animal has" : "", "Appaloosa" : "Appaloosa", "Appenzell Mountain Dog" : "Appenzell Mountain Dog", "Applehead Siamese" : "Applehead Siamese", "Appointment" : "", "Appointment date must be a valid date" : "", "Appointment {0}. {1} on {2} for {3}" : "Muuda liikumist - [{0}, {1} ({2}) to {3}]", "Appointments need a date and time." : "", "Approved" : "", "Apr" : "aprill", "April" : "aprill", "Arabian" : "Arabian", "Area" : "Piirkond", "Area Found" : "Leidmise piirkond", "Area Lost" : "", "Area Postcode" : "Piirkonna indeks", "Area where the animal was found" : "", "Area where the animal was lost" : "", "Areas" : "Piirkond", "Arrived" : "", "Asset" : "Vara", "Asset::Premises" : "", "At least the last name should be completed." : "", "Attach" : "", "Attach File" : "", "Attach Link" : "", "Attach a file" : "", "Attach a link to a web resource" : "", "Attach link" : "", "Audit Trail" : "", "Aug" : "august", "August" : "august", "Australian Cattle Dog/Blue Heeler" : "Australian Cattle Dog/Blue Heeler", "Australian Kelpie" : "Australian Kelpie", "Australian Shepherd" : "Australian Shepherd", "Australian Terrier" : "Australian Terrier", "Auto log users out after this many minutes of inactivity" : "", "Auto removed due to lack of owner contact." : "Eemaldatud automaatselt omaniku kontaktandmete puudumise tõttu.", "Automatically cancel any outstanding reservations on an animal when it is adopted" : "", "Automatically remove" : "", "Automatically return any outstanding foster movements on an animal when it is adopted" : "", "Automatically return any outstanding foster movements on an animal when it is transferred" : "", "Available for adoption" : "", "Available sheltermanager.com reports" : "", "B (Boarding Animal)" : "", "Baby" : "Beebi", "Balance" : "", "Balinese" : "Balinese", "Bank" : "Pank", "Bank account interest" : "", "Bank current account" : "", "Bank deposit account" : "", "Bank savings account" : "", "Bank::Current" : "", "Bank::Deposit" : "", "Bank::Savings" : "", "Banned" : "Loomavõtmiskeeld", "Base Color" : "", "Basenji" : "Basenji", "Basset Hound" : "Basset Hound", "Batch" : "märts", "Batch Number" : "", "Beagle" : "Beagle", "Bearded Collie" : "Bearded Collie", "Beauceron" : "Beauceron", "Bedlington Terrier" : "Bedlington Terrier", "Beginning of month" : "", "Belgian Hare" : "Belgian Hare", "Belgian Shepherd Dog Sheepdog" : "Belgian Shepherd Dog Sheepdog", "Belgian Shepherd Laekenois" : "Belgian Shepherd Laekenois", "Belgian Shepherd Malinois" : "Belgian Shepherd Malinois", "Belgian Shepherd Tervuren" : "Belgian Shepherd Tervuren", "Bengal" : "Bengal", "Bernese Mountain Dog" : "Bernese Mountain Dog", "Beveren" : "Beveren", "Bichon Frise" : "Bichon Frise", "Bird" : "Lind", "Birman" : "Birman", "Bite" : "Hammustus/hammustama", "Biting" : "Hammustamine", "Black" : "Must", "Black Labrador Retriever" : "Black Labrador Retriever", "Black Mouth Cur" : "Black Mouth Cur", "Black Tortie" : "", "Black and Brindle" : "Must ja hallikaspruun", "Black and Brown" : "Must ja pruun", "Black and Tan" : "Must pruuni markeeringuga", "Black and Tan Coonhound" : "Black and Tan Coonhound", "Black and White" : "Must ja Valge", "Bloodhound" : "Bloodhound", "Blue" : "Sinine", "Blue Tortie" : "", "Bluetick Coonhound" : "Bluetick Coonhound", "Board and Food" : "", "Boarding" : "", "Boarding Cost" : "", "Boarding cost type" : "", "Bobtail" : "Bobtail", "Body" : "", "Bombay" : "Bombay", "Bonded" : "", "Bonded With" : "", "Books" : "", "Border Collie" : "Border Collie", "Border Terrier" : "Border Terrier", "Bordetella" : "", "Born in Shelter" : "", "Born on Foster {0}" : "", "Born on Shelter {0}" : "", "Borzoi" : "Borzoi", "Boston Terrier" : "Boston Terrier", "Both" : "", "Bouvier des Flanders" : "Bouvier des Flanders", "Boxer" : "Boxer", "Boykin Spaniel" : "Boykin Spaniel", "Breed" : "Tõug", "Breed to use when publishing to third party services and adoption sites" : "", "Breeds" : "Tõud", "Briard" : "Briard", "Brindle" : "Hallikaspruun", "Brindle and Black" : "Hallikaspruun ja must", "Brindle and White" : "Hallikaspruun ja valge", "Britannia Petite" : "Britannia Petite", "British Shorthair" : "British Shorthair (Briti lühikarvaline)", "Brittany Spaniel" : "Brittany Spaniel", "Brotogeris" : "Brotogeris", "Brought In" : "Looma tooja", "Brought In By" : "Looma tooja", "Brown" : "Pruun", "Brown and Black" : "Pruun ja must", "Brown and White" : "Pruun ja valge", "Browse sheltermanager.com" : "", "Browse sheltermanager.com and install some reports, charts and mail merges into your new system." : "", "Brussels Griffon" : "Brussels Griffon", "Budgie/Budgerigar" : "Viirpapagoi", "Bulk Complete Diary" : "", "Bulk Complete Medical Records" : "", "Bulk Complete Vaccinations" : "", "Bulk Complete Waiting List" : "", "Bulk Regimen" : "", "Bulk Test" : "", "Bulk Transport" : "", "Bulk Vaccination" : "Uus vaktsineerimine", "Bulk change animals" : "", "Bull Terrier" : "Bull Terrier", "Bullmastiff" : "Bullmastiff", "Bunny Rabbit" : "Bunny Rabbit (küülik)", "Burmese" : "Birma", "Burmilla" : "Burmilla", "By" : "", "CC" : "", "CSV of animal/adopter data" : "", "CSV of animal/medical data" : "", "CSV of incident data" : "", "CSV of license data" : "", "CSV of payment data" : "", "CSV of person data" : "", "Caique" : "Caique", "Cairn Terrier" : "Cairn Terrier", "Calendar View" : "", "Calendar view" : "", "Calico" : "Calico", "Californian" : "Californian", "Call" : "", "Call Date/Time" : "", "Caller" : "", "Caller Name" : "", "Caller Phone" : "", "Camel" : "Kaamel", "Can Login" : "Logi sisse", "Can afford donation?" : "", "Can't reserve an animal that has an active movement." : "", "Canaan Dog" : "Canaan Dog", "Canadian Hairless" : "Canadian Hairless", "Canary" : "Kanaarilind", "Cancel" : "Loobu", "Cancel holds on animals this many days after the brought in date, or 0 to never cancel" : "", "Cancel unadopted reservations after" : "", "Cancel unadopted reservations after this many days, or 0 to never cancel" : "", "Cancelled" : "", "Cancelled Reservation" : "Tühistatud broneering", "Cane Corso Mastiff" : "Cane Corso Mastiff", "Carolina Dog" : "Carolina Dog", "Cash" : "", "Cat" : "Kass", "Catahoula Leopard Dog" : "Catahoula Leopard Dog", "Category" : "Kategooria", "Cats" : "Kassid", "Cattery" : "", "Cattle Dog" : "Cattle Dog", "Cavalier King Charles Spaniel" : "Cavalier King Charles Spaniel", "Cell" : "", "Cell Phone" : "", "Champagne D'Argent" : "Champagne DArgent", "Change" : "", "Change Accounts" : "", "Change Animals" : "Muuda loomi", "Change Citations" : "", "Change Clinic Apointment" : "Muuda looma liikumisi", "Change Cost" : "", "Change Date Required" : "", "Change Diets" : "", "Change Found Animal" : "", "Change Incidents" : "", "Change Investigation" : "", "Change Licenses" : "", "Change Litter" : "", "Change Log" : "", "Change Lost Animal" : "", "Change Media" : "", "Change Medical Records" : "", "Change Movement" : "", "Change Password" : "", "Change Payments" : "", "Change Person" : "", "Change Publishing Options" : "", "Change Report" : "", "Change Rota" : "", "Change Stock" : "", "Change System Options" : "", "Change Tests" : "", "Change Transactions" : "", "Change Transport" : "", "Change Trap Loans" : "", "Change User Settings" : "", "Change Vaccinations" : "", "Change Vouchers" : "", "Change Waiting List" : "", "Change date required on selected treatments" : "", "Changed Mind" : "", "Chart" : "", "Chart (Bar)" : "", "Chart (Line)" : "", "Chart (Pie)" : "", "Chart (Point)" : "", "Chart (Steps)" : "", "Chartreux" : "Chartreux", "Check" : "", "Check License" : "", "Check No" : "", "Checkbox" : "", "Checked By" : "", "Checkered Giant" : "Checkered Giant", "Cheque" : "", "Chesapeake Bay Retriever" : "Chesapeake Bay Retriever", "Chicken" : "Kana", "Chihuahua" : "Chihuahua", "Children" : "", "Chinchilla" : "Chinchilla", "Chinese Crested Dog" : "Chinese Crested Dog", "Chinese Foo Dog" : "Chinese Foo Dog", "Chlamydophila" : "", "Chocolate" : "", "Chocolate Labrador Retriever" : "Chocolate Labrador Retriever", "Chocolate Tortie" : "", "Chow Chow" : "Chow Chow", "Cinnamon" : "Cinnamon", "Cinnamon Tortoiseshell" : "", "Citation Type" : "", "Citation Types" : "", "Citations" : "", "City" : "", "City contains" : "", "Class" : "", "Clear" : "Puhasta", "Clear and sign again" : "", "Clear tables before importing" : "", "Clinic" : "", "Clinic Calendar" : "", "Clinic Invoice - {0}" : "", "Clinic Statuses" : "Broneeringute raamat", "Clone" : "Klooni", "Clone Animals" : "Klooni loomi", "Clone Rota" : "", "Clone the rota this week to another week" : "", "Cloning..." : "Sorteerimine...", "Close" : "Sulge", "Clumber Spaniel" : "Clumber Spaniel", "Clydesdale" : "Clydesdale", "Coat" : "Kass", "Coat Type" : "Kulu tüübid", "Coat Types" : "Kulu tüübid", "Cockapoo" : "Cockapoo", "Cockatiel" : "Cockatiel", "Cockatoo" : "Kakaduu", "Cocker Spaniel" : "Cocker Spaniel", "Code" : "Kood", "Code contains" : "", "Code format tokens:" : "", "Collie" : "Collie", "Color" : "", "Color to use when publishing to third party services and adoption sites" : "", "Colors" : "", "Columns" : "", "Columns displayed" : "", "Comma separated list of units for this location, eg: 1,2,3,4,Isolation,Pen 5" : "", "Comments" : "Kommentaarid", "Comments Contain" : "Kommentaarid sisaldavad", "Comments contain" : "Kommentaarid sisaldavad", "Comments copied to web preferred media." : "", "Complaint" : "Kaebus", "Complete" : "Täidetud", "Complete Tasks" : "", "Completed" : "Täidetud", "Completed Between" : "", "Completed Type" : "", "Completed notes upto today" : "", "Completion Date" : "", "Completion Type" : "", "Configuration" : "", "Confirm" : "", "Confirm Password" : "", "Confirmation message" : "", "Confirmed" : "", "Consulting Room" : "", "Consulting Room - {0}" : "", "Consumed" : "", "Contact" : "Kontakt", "Contact Contains" : "", "Conure" : "Conure", "Convert this reservation to an adoption" : "", "Coonhound" : "Coonhound", "Copy animal comments to the notes field of the web preferred media for this animal" : "", "Copy from animal comments" : "", "Copy of {0}" : "Koopia {0}", "Corded" : "Nöörjas (corded)", "Corgi" : "Corgi", "Cornish Rex" : "Cornish Rex", "Cost" : "Kulud", "Cost For" : "", "Cost Type" : "Kulu tüübid", "Cost Types" : "Kulu tüübid", "Cost date must be a valid date" : "", "Cost record" : "", "Costs" : "Kulud", "Costs need a date and amount." : "", "Coton de Tulear" : "Coton de Tulear", "Could not find animal with name '{0}'" : "", "Country" : "Maakond:", "Courtesy Listing" : "", "Cow" : "Lehm", "Cream" : "Kreemikas", "Create" : "", "Create Animal" : "", "Create Log" : "", "Create Payment" : "ravid", "Create Waiting List" : "", "Create a cost record" : "", "Create a due or received payment record from this appointment" : "Loo selle liikumise kohta dokument", "Create a new animal by copying this one" : "Loo uus loom selle kopeerimise teel", "Create a new animal from this found animal record" : "", "Create a new animal from this incident" : "", "Create a new animal from this waiting list entry" : "", "Create a new document" : "", "Create a new template" : "", "Create a new template by copying the selected template" : "", "Create a new waiting list entry from this found animal record" : "", "Create and edit" : "", "Create boarding cost record when animal is adopted" : "", "Create diary notes from a task" : "", "Create missing lookup values" : "", "Create note this many days from today, or 9999 to ask" : "", "Create this message" : "", "Create this person" : "", "Created By" : "Looja", "Creating cost and cost types creates matching accounts and transactions" : "", "Creating payments and payments types creates matching accounts and transactions" : "", "Creating..." : "", "Credit Card" : "Krediitkaart", "Creme D'Argent" : "Crème DArgent", "Criteria:" : "", "Crossbreed" : "Segavereline", "Cruelty Case" : "", "Culling" : "Väljapraakimine", "Curly" : "Lokkis", "Current" : "", "Current Vet" : "Praegune veterinaar", "Cymric" : "Cymric", "D (Dog)" : "K (Koer)", "DD = current day" : "", "DDL dump (DB2)" : "", "DDL dump (MySQL)" : "", "DDL dump (PostgreSQL)" : "", "DHLPP" : "", "DO NOT use this field to store notes about what the person is looking for." : "", "DOA {0}" : "", "DOB" : "", "Dachshund" : "Dachshund", "Daily Boarding Cost" : "", "Dalmatian" : "Dalmatian", "Dandi Dinmont Terrier" : "Dandi Dinmont Terrier", "Data" : "Kuupäev", "Data Protection" : "Viimane asukoht:", "Database" : "Andmebaas", "Date" : "Kuupäev", "Date '{0}' is not valid." : "{0} ei ole lubatud kuupäev.", "Date Brought In" : "Toomise kuupäev", "Date Found" : "Leidmise kuupäev", "Date Lost" : "", "Date Of Birth" : "Sünnikuupäev", "Date Put On" : "Date Put On", "Date Removed" : "Eemaldamise kuupäev", "Date Reported" : "Eemaldamise kuupäev", "Date and notes are mandatory." : "", "Date brought in cannot be blank" : "", "Date brought in cannot be in the future." : "", "Date brought in is not valid" : "", "Date found cannot be blank" : "", "Date found cannot be blank." : "", "Date lost cannot be blank" : "", "Date lost cannot be blank." : "", "Date of Birth" : "Sünnikuupäev", "Date of birth cannot be blank" : "", "Date of birth cannot be in the future." : "", "Date of birth is not valid" : "", "Date of last owner contact" : "", "Date put on" : "Date Put On", "Date put on cannot be blank" : "", "Date put on list" : "", "Date removed" : "Eemaldamise kuupäev", "Date reported cannot be blank" : "", "Date reported cannot be blank." : "", "Date/Time" : "Kuupäev/kellaaeg", "Day" : "", "Day Pivot" : "", "Days On Shelter" : "", "Dead On Arrival" : "Saabudes surnud", "Dead animal" : "", "Dead on arrival" : "Saabudes surnud", "Death" : "Surm", "Death Comments" : "", "Death Reason" : "Surma põhjused", "Death Reasons" : "Surma põhjused", "Debit Card" : "", "Dec" : "detsember", "Deceased" : "", "Deceased Date" : "", "December" : "detsember", "Declawed" : "", "Declined" : "Küüned eemaldatud:", "Default Breed" : "", "Default Brought In By" : "", "Default Coat Type" : "", "Default Color" : "", "Default Cost" : "", "Default Death Reason" : "", "Default Diary Person" : "", "Default Entry Reason" : "", "Default Incident Type" : "", "Default Location" : "", "Default Log Filter" : "", "Default Log Type" : "", "Default Payment Method" : "Tõug vaikimisi:", "Default Payment Type" : "", "Default Reservation Status" : "", "Default Return Reason" : "", "Default Rota Shift" : "", "Default Size" : "", "Default Species" : "", "Default Test Type" : "", "Default Type" : "", "Default Vaccination Type" : "", "Default Value" : "", "Default daily boarding cost" : "", "Default destination account for payments" : "", "Default image for documents" : "", "Default image for this record and the web" : "", "Default source account for costs" : "", "Default to advanced find animal screen" : "", "Default to advanced find person screen" : "", "Default transaction view" : "", "Default urgency" : "", "Default video for publishing" : "", "Default view" : "", "Defaults" : "Vaikimisi", "Defaults formats for code and shortcode are TYYYYNNN and NNT" : "", "Delete" : "Kustuta", "Delete Accounts" : "", "Delete Animals" : "Kustuta loomad", "Delete Citations" : "", "Delete Clinic Appointment" : "Kustuta looma liikumised", "Delete Cost" : "", "Delete Diary" : "", "Delete Diets" : "", "Delete Document from Repository" : "", "Delete Found Animal" : "", "Delete Incidents" : "", "Delete Incoming Forms" : "", "Delete Investigation" : "", "Delete Licenses" : "", "Delete Litter" : "", "Delete Log" : "", "Delete Lost Animal" : "", "Delete Media" : "", "Delete Medical Records" : "", "Delete Movement" : "", "Delete Payments" : "", "Delete Person" : "", "Delete Regimen" : "", "Delete Report" : "", "Delete Rota" : "", "Delete Stock" : "", "Delete Tests" : "", "Delete Transport" : "", "Delete Trap Loans" : "", "Delete Treatments" : "", "Delete Vaccinations" : "", "Delete Vouchers" : "", "Delete Waiting List" : "", "Delete all rota entries for this week" : "", "Delete this animal" : "Kustuta see loom", "Delete this incident" : "", "Delete this person" : "", "Delete this record" : "Kustuta see sissekanne", "Delete this waiting list entry" : "", "Denied" : "", "Deposit" : "", "Deposit Account" : "", "Deposit Returned" : "", "Description" : "Kirjeldus", "Description Contains" : "", "Description cannot be blank" : "", "Deselect" : "", "Details" : "Üksikasjad", "Devon Rex" : "Devon Rex", "Dialog title" : "", "Diary" : "Päevik", "Diary Task" : "", "Diary Task: {0}" : "", "Diary Tasks" : "", "Diary and Messages" : "", "Diary calendar" : "", "Diary date cannot be blank" : "", "Diary date is not valid" : "", "Diary for {0}" : "Päevik {0}", "Diary note cannot be blank" : "", "Diary note {0} marked completed" : "", "Diary note {0} rediarised for {1}" : "", "Diary notes for: {0}" : "", "Diary notes need a date and subject." : "", "Diary subject cannot be blank" : "", "Diary task items need a pivot, subject and note." : "", "Diary tasks need a name." : "", "Did not ask" : "", "Did you know?" : "", "Died" : "Surnud", "Died off shelter" : "", "Died {0}" : "", "Diet" : "Dieet", "Diets" : "Dieedid", "Diets need a start date." : "", "Dispatch" : "", "Dispatch Address" : "", "Dispatch Between" : "", "Dispatch Date/Time" : "", "Dispatch {0}: {1}" : "", "Dispatched ACO" : "", "Display" : "", "Display Index" : "", "Display a search button at the right side of the search box" : "", "Distemper" : "", "Do Not Publish" : "Avalda", "Do Not Register Microchip" : "", "Do not show" : "", "Doberman Pinscher" : "Doberman Pinscher", "Document" : "", "Document Link" : "", "Document Repository" : "", "Document Templates" : "", "Document file" : "", "Document signed" : "", "Document signing request" : "", "Document templates" : "", "Documents" : "", "Dog" : "Koer", "Dogo Argentino" : "Dogo Argentino", "Dogs" : "Koerad", "Dogue de Bordeaux" : "Dogue de Bordeaux", "Domestic Long Hair" : "Kodukass pikakarvaline", "Domestic Medium Hair" : "Kodukass keskmise pikkusega karvaga", "Domestic Short Hair" : "Kodukass lühikarvaline", "Don't create a cost record" : "", "Don't scale" : "", "Donated" : "", "Donation" : "Annetus", "Donation?" : "Annetus?", "Donations for animals entering the shelter" : "", "Done" : "Tuvi", "Donkey" : "Eesel", "Donkey/Mule" : "Donkey/Mule", "Donor" : "Sponsor", "Dosage" : "Doos", "Dove" : "Tuvi", "Download" : "", "Draft" : "Draft (Mustand)", "Driver" : "", "Drop files here..." : "", "Dropoff" : "", "Duck" : "Part", "Due" : "", "Due in next month" : "", "Due in next week" : "", "Due in next year" : "", "Due today" : "", "Duration" : "Kestus", "Dutch" : "Dutch", "Dutch Shepherd" : "Dutch Shepherd", "Dwarf" : "Dwarf", "Dwarf Eared" : "Dwarf Eared", "E = first letter of animal entry category" : "", "EE = first and second letter of animal entry category" : "", "Eclectus" : "Eclectus", "Edit" : "Muuda", "Edit All Diary Notes" : "Muuda kõiki päeviku sissekandeid", "Edit Appointment" : "Muuda liikumist", "Edit Diary Tasks" : "Muuda päeviku ülesandeid", "Edit HTML publishing templates" : "", "Edit Header/Footer" : "", "Edit Invoice Item" : "Muuda dieeti", "Edit Lookups" : "", "Edit My Diary Notes" : "Muuda mu päeviku märkmeid", "Edit Online Forms" : "", "Edit Reports" : "", "Edit Roles" : "", "Edit Users" : "", "Edit account" : "", "Edit additional field" : "", "Edit citation" : "Uus vaktsineerimine", "Edit cost" : "", "Edit diary" : "", "Edit diary notes" : "Muuda mu päeviku märkmeid", "Edit diary task" : "Muuda päeviku ülesandeid", "Edit diary tasks" : "Muuda päeviku ülesandeid", "Edit diet" : "", "Edit document" : "", "Edit form field" : "", "Edit investigation" : "", "Edit invoice" : "Muuda seda vaitšerit", "Edit license" : "", "Edit litter" : "", "Edit litters" : "", "Edit log" : "", "Edit media notes" : "Muuda mu päeviku märkmeid", "Edit medical profile" : "", "Edit medical regimen" : "", "Edit movement" : "", "Edit my diary notes" : "Muuda mu päeviku märkmeid", "Edit my diary notes" : "Muuda mu päeviku märkmeid", "Edit notes" : "", "Edit online form" : "", "Edit online form HTML header/footer" : "", "Edit payment" : "", "Edit report" : "", "Edit report template HTML header/footer" : "", "Edit role" : "", "Edit roles" : "", "Edit rota item" : "", "Edit stock" : "", "Edit system users" : "", "Edit template" : "", "Edit test" : "", "Edit the current waiting list" : "", "Edit transaction" : "", "Edit transport" : "", "Edit trap loan" : "", "Edit user" : "", "Edit vaccination" : "Uus vaktsineerimine", "Edit voucher" : "", "Edit {0}" : "Muuda {0}", "Egyptian Mau" : "Egyptian Mau", "Electricity Bills" : "", "Email" : "E-post", "Email Address" : "", "Email PDF" : "", "Email Person" : "", "Email To" : "", "Email a copy of the selected HTML documents as PDFs" : "", "Email a copy of the selected media files" : "", "Email address" : "", "Email document for electronic signature" : "", "Email incident notes to ACO" : "", "Email incoming form submissions to this comma separated list of email addresses" : "", "Email media" : "", "Email person" : "", "Email signature" : "", "Email submissions to" : "", "Email this message to all matching users" : "", "Email this person" : "", "Email users their diary notes each day" : "", "Emu" : "Emu", "Enable FTP uploading" : "", "Enable accounts functionality" : "", "Enable location filters" : "", "Enable lost and found functionality" : "", "Enable multiple sites" : "", "Enable the waiting list functionality" : "", "Enable visual effects" : "", "Enabled" : "", "End Of Day" : "", "End Time" : "", "End at" : "", "End of month" : "", "End of year" : "", "Ends" : "", "Ends after" : "Lõppeb peale", "English Bulldog" : "English Bulldog", "English Cocker Spaniel" : "English Cocker Spaniel", "English Coonhound" : "English Coonhound", "English Lop" : "English Lop", "English Pointer" : "English Pointer", "English Setter" : "English Setter", "English Shepherd" : "English Shepherd", "English Spot" : "English Spot", "English Springer Spaniel" : "English Springer Spaniel", "English Toy Spaniel" : "English Toy Spaniel", "Entered (newest first)" : "", "Entered (oldest first)" : "", "Entered From" : "", "Entered To" : "", "Entered shelter" : "", "Entering 'activelost' or 'activefound' in the search box will show you lost and found animals reported in the last 30 days." : "", "Entering 'deceased' in the search box will show you recently deceased animals." : "", "Entering 'fosterers', 'homecheckers', 'staff', 'volunteers', 'aco' or 'members' in the search box will show you those groups of people." : "", "Entering 'notforadoption' in the search box will show you all shelter animals with the not for adoption flag set." : "", "Entering 'os' in the search box will show you all shelter animals." : "", "Entlebucher" : "Entlebucher", "Entry" : "", "Entry Category" : "", "Entry Donation" : "", "Entry Reason" : "Sissetuleku põhjused", "Entry Reason Category" : "", "Entry Reasons" : "Sissetuleku põhjused", "Entry reason" : "Sissetuleku põhjused", "Error contacting server." : "", "Escaped" : "Lahti pääsenud", "Escaped {0}" : "", "Eskimo Dog" : "Eskimo Dog", "Estimate" : "Hinnanguline", "Euthanized" : "", "Euthanized {0}" : "", "Every day" : "", "Exclude animals who are aged under" : "", "Exclude from bulk email" : "", "Exclude new animal photos from publishing" : "", "Exclude this image when publishing" : "", "Execute" : "", "Execute Script" : "", "Execute the SQL in the box below" : "", "Executing Task" : "Teosta päeviku ülesanne", "Executing..." : "", "Exotic Shorthair" : "Exotic Shorthair", "Expense" : "Kulu", "Expense::" : "Kulu", "Expenses::Board" : "", "Expenses::Electricity" : "", "Expenses::Food" : "", "Expenses::Gas" : "", "Expenses::Phone" : "", "Expenses::Postage" : "", "Expenses::Stationary" : "", "Expenses::Water" : "", "Expire in next month" : "", "Expired" : "", "Expired in the last month" : "", "Expired in the last week" : "", "Expires" : "", "Expiry" : "", "Expiry date" : "", "Export" : "", "Export Animals as CSV" : "", "Export Report" : "", "Export Reports as CSV" : "", "Export a CSV file of animal records that ASM can import into another database." : "", "Export this database in various formats" : "", "Exporting the complete database can take some time and generate a very large file, are you sure?" : "", "Extra Images" : "", "Extra images" : "", "Extra-Toes Cat (Hemingway Polydactyl)" : "Extra-Toes Cat (Hemingway Polydactyl)", "F (Feral Cat)" : "", "FECV/FeCoV" : "", "FIPV" : "", "FIV" : "", "FIV Result" : "FIV testi tulemus", "FIV+" : "", "FIV/L Test Date" : "", "FIV/L Tested" : "", "FLV" : "", "FLV Result" : "FIV testi tulemus", "FLV+" : "", "FTP hostname" : "", "FTP password" : "", "FTP username" : "", "FVRCP" : "", "Facebook" : "", "Failed sending email" : "", "Failed to create payment." : "", "Failed to renew license." : "", "Fawn" : "", "Fawn Tortoiseshell" : "", "FeLV" : "", "Features" : "Omadused", "Feb" : "veebruar", "February" : "veebruar", "Fee" : "", "Female" : "Emane", "Feral" : "Metsik", "Ferret" : "Tuhkur", "Field Spaniel" : "Field Spaniel", "Field names should not contain spaces." : "", "Fila Brasileiro" : "Fila Brasileiro", "File" : "", "Filter" : "", "Financial" : "", "Finch" : "Vint, leevike", "Find Animal" : "Leia loom", "Find Animal/Person" : "", "Find Found Animal" : "Leia leitud loom", "Find Incident" : "", "Find Lost Animal" : "Leia kadunud loom", "Find Person" : "", "Find a found animal" : "Leia leitud loom", "Find a lost animal" : "Leia kadunud loom", "Find aco" : "", "Find an incident" : "", "Find animal" : "Leia loom", "Find animal columns" : "", "Find animal control incidents returned {0} results." : "", "Find animals matching the looking for criteria of this person" : "", "Find donor" : "", "Find driver" : "", "Find fosterer" : "", "Find found animal returned {0} results." : "", "Find homechecked" : "", "Find homechecker" : "", "Find incident" : "", "Find lost animal returned {0} results." : "", "Find member" : "", "Find person" : "", "Find person columns" : "", "Find retailer" : "", "Find shelter" : "", "Find staff" : "", "Find staff/volunteer" : "", "Find this address on a map" : "", "Find vet" : "", "Find volunteer" : "", "Fine Amount" : "", "Finnish Lapphund" : "Finnish Lapphund", "Finnish Spitz" : "Finnish Spitz", "First Last" : "", "First Names" : "", "First name(s)" : "", "First offence" : "", "Fish" : "Kala", "Flag" : "", "Flags" : "", "Flat-coated Retriever" : "Flat-coated Retriever", "Flemish Giant" : "Flemish Giant", "Florida White" : "Florida White", "Followup" : "", "Followup Between" : "", "Followup Date/Time" : "", "Footer" : "Kasupereks hakkamine", "For" : "Kellele", "Forbidden" : "", "Forenames" : "", "Forget" : "", "Form URL" : "", "Forms need a name." : "", "Foster" : "Kasupereks hakkamine", "Foster Book" : "Hoiukodude raamat", "Foster Capacity" : "", "Foster Transfer" : "Üleviimise kuupäev", "Foster an animal" : "", "Foster book" : "Hoiukodude raamat", "Foster movements must have a valid foster date." : "", "Foster successfully created." : "", "Fostered" : "Kasuperesse antud", "Fostered Animals" : "Pakub hoiukodu", "Fostered to {0} since {1}" : "", "Fosterer" : "Kasuperesse antud", "Fosterer (Active Only)" : "", "Fosterer Medical Report" : "Tagasitulnud loomade aruanne", "Found" : "Hound", "Found Animal" : "Leitud loom", "Found Animal - Additional" : "", "Found Animal - Details" : "", "Found Animal Contact" : "Leitud looma kontakt", "Found Animal {0}" : "Leitud loom: {0}", "Found Animal: {0}" : "Leitud loom: {0}", "Found animal - {0} {1} [{2}]" : "", "Found animal entries matching '{0}'." : "", "Found animals must have a contact" : "", "Found animals reported in the last 30 days." : "", "Found from" : "", "Found to" : "", "FoundLost animal entry {0} successfully created." : "", "Fox Terrier" : "Fox Terrier", "Foxhound" : "Foxhound", "Fr" : "", "French Bulldog" : "French Bulldog", "French-Lop" : "French-Lop", "Frequency" : "Sagedus", "Frequently Asked Questions" : "", "Fri" : "", "Friday" : "", "From" : "", "From Fostering" : "Kasuperest", "From Other" : "Teisest", "From retailer is only valid on adoption movements." : "", "Future notes" : "", "GDPR Contact Opt-In" : "", "Gaited" : "Gaited", "Gas Bills" : "", "Gecko" : "Geko", "General" : "", "Generate" : "", "Generate Documents" : "", "Generate HTML from this SQL" : "", "Generate Report" : "", "Generate a document from this animal" : "", "Generate a document from this incident" : "", "Generate a document from this movement" : "Loo selle liikumise kohta dokument", "Generate a document from this person" : "", "Generate a document from this record" : "Loo sellele omanikule dokument", "Generate a javascript database for the search page" : "", "Generate a new animal code" : "Loo uus looma kood", "Generate a random name for this animal" : "Loo loomale suvaline nimi", "Generate document from this appointment" : "Loo selle liikumise kohta dokument", "Generate document from this license" : "", "Generate document from this payment" : "", "Generate document from this transport" : "Loo sellele omanikule dokument", "Generate documentation" : "", "Generate documents" : "", "Generate image thumbnails as tn_$$IMAGE$$" : "", "Generated document '{0}'" : "", "Gerbil" : "Liivahiir", "German Pinscher" : "German Pinscher", "German Shepherd Dog" : "German Shepherd Dog", "German Shorthaired Pointer" : "German Shorthaired Pointer", "German Wirehaired Pointer" : "German Wirehaired Pointer", "Get more reports from sheltermanager.com" : "", "Gift Aid" : "", "GiftAid" : "", "Giftaid" : "", "Ginger" : "Punakaspruun", "Ginger and White" : "Punakaspruun ja valge", "Give" : "Manustatud", "Give Treatments" : "", "Give Vaccination" : "Uus vaktsineerimine", "Given" : "Manustatud", "Glen of Imaal Terrier" : "Glen of Imaal Terrier", "Go" : "", "Go the lookup data screen and add/remove breeds, species and animal types according to the animals your shelter deals with." : "", "Go the options screen and set your shelter's contact details and other settings." : "", "Go the system users screen and add user accounts for your staff." : "", "Goat" : "Kits", "Golden" : "", "Golden Retriever" : "Golden Retriever", "Goldfish" : "Kuldkala", "Good With Cats" : "Sobib kassidega", "Good With Children" : "Sobib lastega", "Good With Dogs" : "Sobib koertega", "Good with Cats" : "Sobib kassidega", "Good with Children" : "Sobib lastega", "Good with Dogs" : "Sobib koertega", "Good with cats" : "Sobib kassidega", "Good with children" : "Sobib lastega", "Good with dogs" : "Sobib koertega", "Good with kids" : "Sobib kassidega", "Google+" : "", "Goose" : "Hani", "Gordon Setter" : "Gordon Setter", "Grade" : "Grade", "Great Dane" : "Great Dane", "Great Pyrenees" : "Great Pyrenees", "Greater Swiss Mountain Dog" : "Greater Swiss Mountain Dog", "Green" : "Roheline", "Grey" : "Hall", "Grey and White" : "Hall ja valge", "Greyhound" : "Greyhound", "Guinea Pig" : "Merisiga", "Guinea fowl" : "Pärlkana", "HMRC Gift Aid Spreadsheet" : "", "HTML" : "", "HTML Publishing Templates" : "", "HTML/FTP Publisher" : "", "Hairless" : "Karvutu", "Half-Yearly" : "Poole aasta tagant", "Hamster" : "Hamster", "Harlequin" : "Harlequin", "Havana" : "Havana", "Havanese" : "Havanese", "Header" : "", "Health Problems" : "", "Health and Identification" : "", "Healthy" : "Terve", "Heartworm" : "", "Heartworm Test Date" : "", "Heartworm Test Result" : "", "Heartworm Tested" : "", "Heartworm+" : "", "Hedgehog" : "Siil", "Held" : "", "Help" : "", "Hepatitis" : "", "Here are some things you should do before you start adding animals and people to your database." : "", "Hidden" : "", "Hidden Comments" : "", "Hidden comments about the animal" : "", "Hide deceased animals from the home page" : "", "High" : "Kõrge", "Highlight" : "", "Himalayan" : "Himalayan", "History" : "Ajalugu", "Hold" : "", "Hold the animal until this date or blank to hold indefinitely" : "", "Hold until" : "", "Hold until {0}" : "", "Holland Lop" : "Holland Lop", "Home" : "", "Home Phone" : "", "Home page" : "", "Homecheck Areas" : "", "Homecheck Date" : "", "Homecheck History" : "", "Homecheck areas" : "", "Homechecked" : "Kodu on külastatud", "Homechecked By" : "Kodu on külastatud", "Homechecked by" : "Kodu on külastatud", "Homechecker" : "Kodude külastaja", "Horizontal Pitch" : "", "Horse" : "Hobune", "Hotot" : "Hotot", "Hound" : "Hound", "Hours" : "", "Housetrained" : "Puhtust pidav", "Hovawart" : "Hovawart", "How urgent is it that we take this animal?" : "", "Husky" : "Husky", "I've finished, Don't show me this popup again." : "", "IP Restriction" : "", "IP restriction is a space-separated list of IP netblocks in CIDR notation that this user is *only* permitted to login from (eg: 192.168.0.0/24 127.0.0.0/8). If left blank, the user can login from any address." : "", "Ibizan Hound" : "Ibizan Hound", "If the shelter provides initial insurance cover to new adopters, the policy number" : "", "If this form has a populated emailaddress field during submission, send a confirmation email to it" : "", "If this is the web preferred image, web publishers will use these notes as the animal description" : "", "If this person is a fosterer, the maximum number of animals they can care for." : "", "If this person is a member, the date that membership expires." : "", "If this person is a member, their membership number" : "Kui see omanik on liige, siis liikmenumber", "If this person is a member, their membership number." : "", "If this stock record is for a drug, the batch number from the container" : "", "If this stock record is for a perishable good, the expiry date on the container" : "", "If you assign view or edit roles, only users within those roles will be able to view and edit this account." : "", "If you don't select any locations, publishers will include animals in all locations." : "", "Iguana" : "Iguaan", "Illyrian Sheepdog" : "Illyrian Sheepdog", "Image" : "", "Image file" : "", "Import" : "", "Import a CSV file" : "", "Import a PayPal CSV file" : "", "Import from file" : "", "Important" : "", "In" : "", "In SubTotal" : "", "In the last month" : "", "In the last quarter" : "", "In the last week" : "", "In the last year" : "", "In-Kind Donation" : "Muuda annetust", "Inactive" : "", "Inactive - do not include" : "", "Incident" : "", "Incident - Additional" : "", "Incident - Citation" : "", "Incident - Details" : "", "Incident - Dispatch" : "", "Incident - Owner" : "", "Incident Between" : "", "Incident Completed Types" : "", "Incident Date/Time" : "", "Incident Type" : "", "Incident Types" : "", "Incident date cannot be blank" : "", "Incident followup" : "", "Incident {0} successfully created." : "", "Incident {0}, {1}: {2}" : "", "Incidents" : "", "Incidents Requiring Followup" : "", "Include CSV header line" : "", "Include Removed" : "", "Include animals in the following locations" : "", "Include animals on trial adoption" : "", "Include animals who don't have a description" : "Lisa loomad, kes on antud omanikele tagasi", "Include animals who don't have a picture" : "", "Include cruelty case animals" : "", "Include deceased animals" : "Lisa tagastatud loomad", "Include fostered animals" : "", "Include found" : "", "Include held animals" : "", "Include incomplete medical records when generating document templates" : "", "Include incomplete vaccination and test records when generating document templates" : "", "Include non-shelter animals" : "Lisan varjupaigaloomad?", "Include off-shelter animals in medical calendar and books" : "", "Include preferred photo" : "Lisa lahkunud", "Include quarantined animals" : "", "Include reserved animals" : "", "Include retailer animals" : "", "Include returned" : "", "Include this image when publishing" : "", "Include unaltered animals" : "Lisa tagastatud loomad", "Income" : "Tulu", "Income from an on-site shop" : "", "Income::" : "Tulu", "Income::Adoption" : "", "Income::Donation" : "", "Income::EntryDonation" : "", "Income::Interest" : "", "Income::OpeningBalances" : "", "Income::Shop" : "", "Income::Sponsorship" : "", "Income::WaitingList" : "", "Incoming" : "Sissetulev", "Incoming Forms" : "", "Incoming donations (misc)" : "", "Incoming forms are online forms that have been completed and submitted by people on the web." : "", "Incomplete incidents" : "", "Incomplete notes upto today" : "", "Index" : "", "Individual/Couple" : "", "Induct a new animal" : "", "Information" : "Info", "Initials" : "", "Install" : "", "Install the selected reports to your database" : "", "Insurance" : "", "Insurance No" : "", "Intake" : "", "Intakes {0}" : "", "Internal Location" : "Sisemine asukoht", "Internal Locations" : "Sisemine asukoht", "Invalid email address" : "", "Invalid email address '{0}'" : "", "Invalid microchip number length" : "", "Invalid time '{0}', times should be in 00:00 format" : "", "Invalid time, times should be in HH:MM format" : "", "Invalid username or password." : "", "Investigation" : "", "Investigations" : "", "Investigator" : "", "Invoice Only" : "", "Invoice items need a description and amount." : "", "Irish Setter" : "Irish Setter", "Irish Terrier" : "Irish Terrier", "Irish Water Spaniel" : "Irish Water Spaniel", "Irish Wolfhound" : "Irish Wolfhound", "Is this a permanent foster?" : "", "Is this a trial adoption?" : "", "Issue a new insurance number for this animal/adoption" : "", "Issue date and expiry date must be valid dates." : "", "Issued" : "", "Issued in the last month" : "", "Issued in the last week" : "", "Italian Greyhound" : "Italian Greyhound", "Italian Spinone" : "Italian Spinone", "Item" : "", "Jack Russell Terrier" : "Jack Russell Terrier", "Jan" : "jaanuar", "January" : "jaanuar", "Japanese Bobtail" : "Japanese Bobtail", "Japanese Chin" : "Japanese Chin", "Javanese" : "Javanese", "Jersey Wooly" : "Jersey Wooly", "Jindo" : "Jindo", "Jul" : "juuli", "July" : "juuli", "Jump to diary" : "", "Jump to donations" : "", "Jump to media" : "", "Jump to movements" : "", "Jun" : "juuni", "June" : "juuni", "Jurisdiction" : "Kestus", "Jurisdictions" : "", "Kai Dog" : "Kai Dog", "Kakariki" : "Kakariki", "Karelian Bear Dog" : "Karelian Bear Dog", "Keep table headers visible when scrolling" : "", "Keeshond" : "Keeshond", "Kennel" : "", "Kerry Blue Terrier" : "Kerry Blue Terrier", "Kishu" : "Kishu", "Kittens (under {0} months)" : "", "Km" : "", "Komondor" : "Komondor", "Korat" : "Korat", "Kuvasz" : "Kuvasz", "Kyi Leo" : "Kyi Leo", "Label" : "", "Labrador Retriever" : "Labrador Retriever", "Lakeland Terrier" : "Lakeland Terrier", "Lancashire Heeler" : "Lancashire Heeler", "Large" : "Suur", "Last First" : "", "Last Location" : "", "Last Month" : "", "Last Name" : "", "Last Week" : "", "Last changed by {0} on {1}" : "", "Last name" : "", "Last, First" : "", "Latency" : "", "Latency Tester" : "", "Least recently changed" : "", "Leave" : "", "Leave of absence" : "", "Left Margin" : "", "Left shelter" : "", "Leonberger" : "Leonberger", "Leptospirosis" : "", "Letter" : "Setter", "Lhasa Apso" : "Lhasa Apso", "Liability" : "Kohustus", "Licence for {0} successfully renewed {1} - {2}" : "", "License" : "", "License Number" : "", "License Types" : "", "License number '{0}' has already been issued." : "", "License numbers matching '{0}'." : "", "License requires a number" : "", "License requires a person" : "", "License requires issued and expiry dates" : "", "Licenses" : "", "Licensing" : "", "Lifetime" : "", "Light Amber" : "", "Lilac" : "Lilac", "Lilac Tortie" : "", "Limited to {0} matches" : "", "Link" : "Link", "Link an animal" : "", "Link to an external web resource" : "", "Link to this animal" : "", "Links" : "Lingid", "List" : "", "Litter" : "Pesakonnad", "Litter Ref" : "", "Litter Reference" : "", "Littermates" : "", "Litters" : "Pesakonnad", "Litters need at least a required date and number." : "", "Live Releases {0}" : "Välja laskmise kuupäev", "Liver" : "Maksavärvi", "Liver and White" : "Maksavärvi ja valge", "Lizard" : "Sisalik", "Llama" : "Laama", "Loading..." : "", "Loan" : "Laen", "Local" : "Kohalik fail:", "Locale" : "", "Location" : "Asukoht", "Location Filter" : "", "Location and Species" : "", "Location and Type" : "", "Location and Unit" : "", "Locations" : "Asukoht", "Log" : "Logi", "Log Text" : "", "Log Type" : "", "Log Types" : "", "Log date must be a valid date" : "", "Log entries need a date and text." : "", "Log requires a date." : "", "Log requires a person." : "", "Log requires an animal." : "", "Log successfully added." : "", "Login" : "Logi sisse", "Logout" : "Logi välja", "Long" : "Pikk", "Long term" : "", "Longest On Shelter" : "", "Looking For" : "", "Looking for" : "", "Lookup" : "Päring", "Lookup (Multiple Select)" : "", "Lookup Values" : "", "Lookup data" : "", "Lookups" : "Päring", "Lop Eared" : "Lop Eared", "Lory/Lorikeet" : "Lory/Lorikeet", "Lost" : "Kulud", "Lost Animal" : "Kaotatud loom", "Lost Animal - Additional" : "", "Lost Animal - Details" : "", "Lost Animal Contact" : "Kaotatud looma kontakt", "Lost Animal: {0}" : "Kaotatud loom: {0}", "Lost and Found" : "", "Lost and found entries must have a contact" : "", "Lost animal - {0} {1} [{2}]" : "", "Lost animal entries matching '{0}'." : "", "Lost animal entry {0} successfully created." : "", "Lost animals must have a contact" : "", "Lost animals reported in the last 30 days." : "", "Lost from" : "", "Lost to" : "", "Lost/Found" : "", "Lots of reports installed? Clean up the Reports menu with Settings-Options- Display-Show report menu items in collapsed categories." : "", "Lovebird" : "Lembelind", "Low" : "Madal", "Lowchen" : "", "Lowest" : "Madalaim", "M (Miscellaneous)" : "M (Muu)", "MM = current month" : "", "Macaw" : "Aara", "Mail" : "", "Mail Merge" : "Kirjakooste", "Mail Merge - {0}" : "", "Maine Coon" : "Maine Coon", "Make this the default image when creating documents" : "", "Make this the default image when viewing this record and publishing to the web" : "", "Make this the default video link when publishing to the web" : "", "Male" : "Isane", "Maltese" : "Maltese", "Manchester Terrier" : "Manchester Terrier", "Mandatory" : "", "Manual" : "", "Manually enter codes (do not generate)" : "", "Manufacturer" : "", "Manx" : "Manx", "Map" : "", "Map of active incidents" : "", "Mar" : "märts", "March" : "märts", "Maremma Sheepdog" : "Maremma Sheepdog", "Mark Deceased" : "", "Mark an animal deceased" : "", "Mark dispatched now" : "", "Mark new animals as not for adoption" : "Märgi uued loomad kui mitteloovutatavad", "Mark responded now" : "", "Mark selected payments received" : "", "Mark this owner homechecked" : "", "Mark treatments given" : "", "Marketer" : "", "Markings" : "Märgistus", "Markup" : "", "Marriage/Relationship split" : "Abielu/kooselu lahutus", "Mastiff" : "Mastiff", "Match" : "märts", "Match Lost and Found" : "", "Match against other lost/found animals" : "", "Match lost and found animals" : "", "Match this animal with the lost and found database" : "", "Maternity" : "", "May" : "mai", "McNab" : "McNab", "Media" : "Meedia", "Media Notes" : "", "Media notes contain" : "", "Medical" : "Meditsiiniline", "Medical Book" : "Meditsiiniline raamat", "Medical Profiles" : "Meditsiiniprofiilid", "Medical book" : "Meditsiiniline raamat", "Medical calendar" : "", "Medical profiles" : "Meditsiiniprofiilid", "Medical profiles need a profile name, treatment, dosage and frequencies." : "", "Medical regimens need an animal, name, dosage, a start date and frequencies." : "", "Medicate" : "Meditsiiniline", "Medicate Animal" : "", "Medium" : "Keskmine", "Member" : "Liige", "Membership Expiry" : "", "Membership Number" : "", "Merge" : "", "Merge Person" : "", "Merge another animal into this one" : "Liida teise omaniku andmed sellega", "Merge another person into this one" : "", "Merge bonded animals into a single record" : "", "Merge duplicate records" : "", "Message" : "", "Message Board" : "", "Message from {0}" : "", "Message successfully sent to {0}" : "", "Messages" : "", "Messages successfully sent" : "", "Method" : "", "Microchip" : "", "Microchip Date" : "", "Microchip Number" : "Mikrokiibi number", "Microchip number {0} has already been allocated to another animal." : "", "Microchipped" : "", "Miles" : "", "Mini Rex" : "Mini Rex", "Mini-Lop" : "Mini-Lop", "Miniature Pinscher" : "Miniature Pinscher", "Minutes" : "", "Missouri Foxtrotter" : "Missouri Foxtrotter", "Mixed Breed" : "Tõug", "Mo" : "", "Mobile signing pad" : "", "Modify Additional Fields" : "", "Modify Document Templates" : "", "Modify Lookups" : "", "Mon" : "", "Monday" : "", "Money" : "Raha", "Month" : "", "Monthly" : "Igakuine", "More Info Needed" : "", "More Medications" : "", "More Tests" : "", "More Vaccinations" : "Uus vaktsineerimine", "More diary notes" : "Muuda mu päeviku märkmeid", "Morgan" : "Morgan", "Most browsers let you search in dropdowns by typing the first few letters of the item you want." : "", "Most browsers will let you visit a record you have been to in this session by typing part of its name in the address bar." : "", "Most recently changed" : "", "Most relevant" : "", "Mother" : "", "Mountain Cur" : "Mountain Cur", "Mountain Dog" : "Mountain Dog", "Mouse" : "Hiir", "Move" : "Tuvi", "Move an animal to a retailer" : "", "Moved to animal record {0}" : "", "Movement" : "Liikumine", "Movement Date" : "Liikumise tüüp", "Movement Number" : "", "Movement Type" : "Liikumise tüüp", "Movement Types" : "Liikumise tüüp", "Movement dates clash with an existing movement." : "", "Movement numbers must be unique." : "", "Movements" : "Liikumised", "Movements require an animal" : "", "Movements require an animal." : "", "Moving..." : "", "Multi-Lookup" : "", "Multiple Treatments" : "Mitmekordne ravi", "Munchkin" : "Munchkin", "Munsterlander" : "Munsterlander", "Mustang" : "Mustang", "My Fosters" : "Kasupereks hakkamine", "My Incidents" : "", "My Undispatched Incidents" : "", "My diary notes" : "Muuda mu päeviku märkmeid", "My sheltermanager.com account" : "", "Mynah" : "Acridotheres maina lind", "N (Non-Shelter Animal)" : "Mitte-varjupaiga loom.", "NNN or NN = number unique for this type of animal for this year" : "", "Name" : "Nimi", "Name Contains" : "", "Name and Address" : "", "Name cannot be blank" : "", "Name contains" : "", "Neapolitan Mastiff" : "Neapolitan Mastiff", "Negative" : "Negatiivne", "Neglect" : "", "Netherland Dwarf" : "Netherland Dwarf", "Neuter/Spay" : "Steriliseerima/kastreerima", "Neutered" : "Kastreeritud", "Neutered/Spayed Non-Shelter Animals In {0}" : "Üksikasjalik varjupaiga loomade loend {0}", "Neutered/Spayed Shelter Animals In {0}" : "Üksikasjalik varjupaiga loomade loend {0}", "New" : "Uus", "New Account" : "", "New Appointment" : "Uus dieet", "New Citation" : "", "New Cost" : "Uus maksumus", "New Diary" : "", "New Diet" : "Uus dieet", "New Document" : "", "New Field" : "", "New Fosterer" : "", "New Guinea Singing Dog" : "New Guinea Singing Dog", "New Item" : "UUs pesakond", "New License" : "", "New Litter" : "", "New Log" : "", "New Movement" : "", "New Owner" : "", "New Password" : "", "New Payment" : "", "New Profile" : "", "New Record" : "Uus parool:", "New Regimen" : "", "New Report" : "", "New Role" : "", "New Stock" : "", "New Task" : "Uus maksumus", "New Template" : "", "New Test" : "Uus maksumus", "New Transport" : "", "New Trap Loan" : "", "New User" : "", "New Vaccination" : "Uus vaktsineerimine", "New Voucher" : "Uus vautšer", "New Waiting List Entry" : "Uus ootenimekirja sissekanne", "New Zealand" : "New Zealand", "New diary task" : "Muuda päeviku ülesandeid", "New form field" : "", "New name" : "", "New online form" : "", "New password and confirmation password don't match." : "", "New task detail" : "Uus ülesande detail", "New template" : "", "Newfoundland Dog" : "Newfoundland Dog", "Next" : "Tekst", "No" : "Ei", "No adjustment" : "", "No data to show on the report." : "", "No data." : "", "No description" : "Kirjeldus", "No longer retained" : "", "No matches found." : "Vastavusi ei leitud.", "No picture" : "", "No publishers are running." : "", "No results found." : "", "No results." : "", "No tasks are running." : "", "No view permission for this report" : "", "Noise" : "", "Non-Shelter" : "Mitte-varjupaiga", "Non-Shelter Animal" : "Mitte-varjupaiga loom.", "Non-Shelter Animals" : "Mitte-varjupaiga loom.", "Non-shelter Animals" : "Mitte-varjupaiga loom.", "None" : "Puudub", "Norfolk Terrier" : "Norfolk Terrier", "Normal user" : "", "Norwegian Buhund" : "Norwegian Buhund", "Norwegian Elkhound" : "Norwegian Elkhound", "Norwegian Forest Cat" : "Norwegian Forest Cat", "Norwegian Lundehund" : "Norwegian Lundehund", "Norwich Terrier" : "Norwich Terrier", "Not Arrived" : "", "Not Available For Adoption" : "", "Not Available for Adoption" : "", "Not For Adoption" : "Loom ei ole loovutamiseks", "Not Microchipped" : "", "Not Reconciled" : "", "Not available for adoption" : "", "Not dispatched" : "", "Not for adoption" : "Loom ei ole loovutamiseks", "Not for adoption flag set" : "", "Not in chosen publisher location" : "", "Not reconciled" : "", "Note" : "Märkus", "Notes" : "Märkmed", "Notes about the death of the animal" : "", "Nov" : "november", "Nova Scotia Duck-Tolling Retriever" : "Nova Scotia Duck-Tolling Retriever", "November" : "november", "Now" : "", "Number" : "Number", "Number in litter" : "", "Number of Tasks" : "", "Number of animal links to show" : "", "Number of fields" : "", "Number of pets" : "", "Ocicat" : "Ocicat", "Oct" : "oktoober", "October" : "oktoober", "Office" : "", "Old English Sheepdog" : "Old English Sheepdog", "Old Password" : "", "Omit criteria" : "", "Omit header/footer" : "", "On Foster (in figures)" : "Kasuperes (arvudes)", "On Shelter" : "Varjupaigas", "On shelter for {0} days, daily cost {1}, cost record total <b>{2}</b>" : "", "On shelter for {0} days. Total cost: {1}" : "Varjupaigas {0} päeva. Maksumus kokku: {1}", "Once assigned, codes cannot be changed" : "", "Once signed, this document cannot be edited or tampered with." : "", "One Off" : "One-Off", "One-Off" : "One-Off", "Online Form: {0}" : "", "Online Forms" : "", "Online form fields need a name and label." : "", "Online forms can be linked to from your website and used to take information from visitors for applications, etc." : "", "Only PDF, HTML and JPG image files can be attached." : "", "Only active accounts" : "", "Only allow users with one of these roles to view this incident" : "", "Only show account totals for the current period, which starts on " : "", "Only show declawed" : "", "Only show pickups" : "", "Only show special needs" : "", "Only show transfers" : "", "Open Incidents" : "", "Open records in a new browser tab" : "", "Open reports in a new browser tab" : "", "Opening balances" : "", "Optional, the date the vaccination \"wears off\" and needs to be administered again" : "", "Options" : "Valikud", "Or move this diary on to" : "", "Order published animals by" : "", "Organisation" : "Organisatsioon", "Organization" : "Organisatsioon", "Organization name" : "", "Oriental Long Hair" : "Oriental Long Hair", "Oriental Short Hair" : "Oriental Short Hair", "Oriental Tabby" : "Oriental Tabby", "Original Owner" : "Algne omanik", "Ostrich" : "Jaanalind", "Other Account" : "", "Other Organisation" : "", "Other Shelter" : "", "Otterhound" : "Otterhound", "Our shelter does trial adoptions, allow us to mark these on movement screens" : "", "Out" : "", "Out Between" : "", "Out SubTotal" : "", "Output a deceased animals page" : "", "Output a page with links to available online forms" : "", "Output a separate page for each animal type" : "", "Output a separate page for each species" : "", "Output an adopted animals page" : "", "Output an rss.xml page" : "", "Overdue" : "", "Overdue medical items" : "", "Overtime" : "", "Owl" : "Kakk", "Owner" : "Omanik", "Owner Vet" : "", "Owner given citation" : "", "Owners Vet" : "", "PM" : "", "Page extension" : "", "Paid" : "", "Paint/Pinto" : "Paint/Pinto", "Palomino" : "Palomino", "Paper Size" : "", "Papillon" : "Papillon", "Parainfluenza" : "", "Parakeet (Other)" : "(Väike pika sabaga) papagoi", "Parent" : "Vanem", "Parrot (Other)" : "Papagoi (muu)", "Parrotlet" : "Parrotlet", "Parvovirus" : "Parvoviirus", "Paso Fino" : "Paso Fino", "Pass Homecheck" : "", "Password" : "", "Password for '{0}' has been reset." : "", "Password is incorrect." : "", "Password successfully changed." : "", "Passwords cannot be blank." : "", "Path" : "", "Patterdale Terrier (Fell Terrier)" : "Patterdale Terrier (Fell Terrier)", "PayPal" : "", "Payment" : "", "Payment Book" : "", "Payment From" : "", "Payment Methods" : "", "Payment Type" : "", "Payment Types" : "", "Payment book" : "", "Payment calendar" : "", "Payment of {0} successfully received ({1})." : "", "Payments" : "", "Payments need at least one date, an amount and a person." : "", "Payments of type" : "", "Payments require a person" : "", "Payments require a received date" : "", "Peacock/Pea fowl" : "Paabulind", "Pekingese" : "Pekingese", "Pending Adoption" : "", "Pending Apartment Verification" : "", "Pending Home Visit" : "", "Pending Vet Check" : "", "Pension" : "Pension", "People" : "", "People Looking For" : "", "People matching '{0}'." : "", "People or animal records that already exist in the database will not be imported again and movement/payment data will be attached to the existing records instead." : "", "People with active reservations, but no homecheck has been done." : "", "People with overdue donations." : "", "Percheron" : "Percheron", "Perform" : "", "Perform Homecheck" : "", "Perform Test" : "", "Performed" : "", "Permanent Foster" : "", "Persian" : "Pärsia", "Person" : "", "Person - Additional" : "", "Person - Name and Address" : "", "Person - Type" : "", "Person Flags" : "", "Person looking for report" : "", "Person successfully created" : "", "Personal" : "", "Peruvian Inca Orchid" : "Peruvian Inca Orchid", "Peruvian Paso" : "Peruvian Paso", "Petit Basset Griffon Vendeen" : "Petit Basset Griffon Vendeen", "Pharaoh Hound" : "Pharaoh Hound", "Pheasant" : "Faasan", "Phone" : "Telefon", "Phone contains" : "Linn sisaldab:", "Photo successfully uploaded." : "", "Picked Up" : "", "Picked Up By" : "", "Pickup" : "", "Pickup Address" : "", "Pickup Location" : "", "Pickup Locations" : "", "Pig" : "Siga", "Pig (Farm)" : "Kodusiga", "Pigeon" : "Tuvi", "Pinterest" : "", "Pionus" : "Pionus", "Pit Bull Terrier" : "Pit Bull Terrier", "Pixie-Bob" : "Pixie-Bob", "Please click the Sign button when you are finished." : "", "Please see the manual for more information." : "", "Please select a PDF, HTML or JPG image file to attach" : "", "Please tighten the scope of your email campaign to {0} emails or less." : "", "Please use the links below to electronically sign these documents." : "", "Plott Hound" : "Plott Hound", "Poicephalus/Senegal" : "Poicephalus/Senegal (turteltuvi)", "Pointer" : "Pointer", "Points for being found within 2 weeks of being lost" : "", "Points for matching age group" : "", "Points for matching breed" : "", "Points for matching color" : "", "Points for matching features" : "", "Points for matching lost/found area" : "", "Points for matching sex" : "", "Points for matching species" : "", "Points for matching zipcode" : "", "Points required to appear on match report" : "", "Polish" : "Polish", "Polish Lowland Sheepdog" : "Polish Lowland Sheepdog", "Pomeranian" : "Pomeranian", "Pony" : "Pony", "Poodle" : "Poodle", "Portugese Podengo" : "Portugese Podengo", "Portuguese Water Dog" : "Portuguese Water Dog", "Positive" : "Positiivne", "Positive for Heartworm, FIV or FLV" : "", "Positive/Negative" : "", "Post" : "Indeks", "Postage costs" : "", "Pot Bellied" : "Pot Bellied (vatsakas)", "Prairie Dog" : "Rohtlahaukur", "Prefill new media notes for animal images with animal comments if left blank" : "", "Prefill new media notes with the filename if left blank" : "", "Premises" : "", "Presa Canario" : "Presa Canario", "Press F11 in HTML or SQL code editing boxes to edit in fullscreen mode" : "", "Preview" : "Eelvaade", "Previous" : "", "Previous Adopter" : "Potentsiaalsed adopteerijad", "Print" : "Prindi", "Print Preview" : "", "Print selected forms" : "", "Printable Manual" : "", "Printing word processor documents uses hidden iframe and window.print" : "", "Priority" : "", "Priority Floor" : "", "Produce a CSV File" : "", "Produce a PDF of printable labels" : "", "Profile" : "", "Profile name cannot be blank" : "", "Public Holiday" : "", "Publish Animals to the Internet" : "", "Publish HTML via FTP" : "", "Publish now" : "", "Publish to folder" : "", "Published to Website" : "", "Publisher" : "", "Publisher Breed" : "", "Publisher Color" : "", "Publisher Logs" : "", "Publisher Species" : "", "Publishing" : "", "Publishing History" : "", "Publishing Logs" : "", "Publishing Options" : "", "Publishing complete." : "", "Publishing template" : "", "Pug" : "Pug (Mops)", "Puli" : "Puli", "Pumi" : "Pumi", "Puppies (under {0} months)" : "", "Purchased" : "", "Qty" : "", "Quaker Parakeet" : "Quaker Parakeet", "Quantity" : "", "Quarantine" : "", "Quarterhorse" : "Quarterhorse", "Quarterly" : "Kord kvartalis", "Quick Links" : "", "Quicklinks" : "", "Quicklinks are shown on the home page and allow quick access to areas of the system." : "", "R" : "", "Rabbit" : "Jänes", "Rabies" : "", "Rabies Tag" : "", "RabiesTag" : "", "Radio Buttons" : "", "Ragamuffin" : "Ragamuffin", "Ragdoll" : "Ragdoll", "Rank" : "Koht", "Rat" : "Rott", "Rat Terrier" : "Rat Terrier", "Raw Markup" : "", "Read the manual for more information about Animal Shelter Manager." : "", "Real name" : "", "Reason" : "Põhjus", "Reason For Appointment" : "Tagastamise põhjus", "Reason Not From Owner" : "", "Reason for Entry" : "", "Reason for entry" : "", "Reason not from Owner" : "", "Reason the owner did not bring in the animal themselves" : "", "Recalculate ALL animal ages/times" : "", "Recalculate ALL animal locations" : "", "Recalculate on-shelter animal locations" : "", "Receipt No" : "", "Receipt/Invoice" : "", "Receive" : "Broneeritud", "Receive a donation" : "", "Receive a payment" : "", "Received" : "Broneeritud", "Received in last day" : "", "Received in last month" : "", "Received in last week" : "", "Received in last year" : "", "Received today" : "", "Recently Adopted" : "", "Recently Changed" : "", "Recently Entered Shelter" : "", "Recently Fostered" : "", "Recently deceased" : "", "Recently deceased shelter animals (last 30 days)." : "", "Reception" : "", "Reclaim" : "Tagastatud", "Reclaim an animal" : "", "Reclaim movements must have a valid reclaim date." : "", "Reclaim successfully created." : "", "Reclaimed" : "Tagastatud", "Reconcile" : "", "Reconciled" : "", "Redbone Coonhound" : "Redbone Coonhound", "Rediarised" : "", "Redirect to URL after POST" : "", "Reference" : "Viide", "Refresh" : "Värskenda", "Regenerate 'Match lost and found animals' report" : "", "Regenerate 'Person looking for' report" : "", "Regenerate annual animal figures for" : "", "Regenerate monthly animal figures for" : "", "Regenerate person names in selected format" : "", "Register Microchip" : "", "Register microchips after" : "", "Released To Wild" : "Vabadusse lastud", "Released To Wild {0}" : "Vabadusse lastud", "Reload" : "", "Remaining" : "Järele jäänud", "Remember me on this computer" : "", "Removal" : "Eemaldamine", "Removal Reason" : "", "Removal reason" : "", "Remove" : "", "Remove HTML and PDF document media after this many years" : "", "Remove clinic functionality from screens and menus" : "", "Remove fine-grained animal control incident permissions" : "", "Remove holds after" : "", "Remove move menu and the movements tab from animal and person screens" : "", "Remove personally identifiable data" : "", "Remove previously published files before uploading" : "", "Remove retailer functionality from the movement screens and menus" : "", "Remove short shelter code box from the animal details screen" : "", "Remove the FIV/L test fields from animal health details" : "", "Remove the Litter ID field from animal details" : "", "Remove the Rabies Tag field from animal health details" : "", "Remove the adoption coordinator field from animal entry details" : "", "Remove the adoption fee field from animal details" : "", "Remove the animal control functionality from menus and screens" : "", "Remove the bonded with fields from animal entry details" : "", "Remove the city/state fields from person details" : "", "Remove the coat type field from animal details" : "", "Remove the declawed box from animal health details" : "", "Remove the document repository functionality from menus" : "", "Remove the good with fields from animal notes" : "", "Remove the heartworm test fields from animal health details" : "", "Remove the insurance number field from the movement screens" : "", "Remove the location unit field from animal details" : "", "Remove the microchip fields from animal identification details" : "", "Remove the neutered fields from animal health details" : "", "Remove the online form functionality from menus" : "", "Remove the picked up fields from animal entry details" : "", "Remove the rota functionality from menus and screens" : "", "Remove the size field from animal details" : "", "Remove the stock control functionality from menus and screens" : "", "Remove the tattoo fields from animal identification details" : "", "Remove the transport functionality from menus and screens" : "", "Remove the trap loan functionality from menus and screens" : "", "Remove the weight field from animal details" : "", "Removed" : "", "Rename" : "", "Renew License" : "", "Renew licence" : "", "Renew license" : "", "Report" : "Aruanne", "Report Title" : "", "Report a new incident" : "", "Reports" : "Aruanded", "Request signature by email" : "", "Requested" : "Soovitud", "Require followup" : "", "Required" : "Nõutav", "Required date must be a valid date" : "", "Reschedule" : "", "Reservation" : "Broneeritud", "Reservation Book" : "Broneeringute raamat", "Reservation Cancelled" : "", "Reservation Date" : "Broneeringute raamat", "Reservation For" : "Broneeringute raamat", "Reservation Status" : "Broneeringute raamat", "Reservation Statuses" : "Broneeringute raamat", "Reservation book" : "Broneeringute raamat", "Reservation date cannot be after cancellation date." : "", "Reservation successfully created." : "", "Reservations must have a valid reservation date." : "", "Reserve" : "Broneeritud", "Reserve an animal" : "", "Reserved" : "Broneeritud", "Reset" : "", "Reset Password" : "", "Respond" : "", "Responded" : "", "Responded Between" : "", "Responded Date/Time" : "", "Result" : "", "Results" : "", "Results for '{0}'." : "", "Retailer" : "Edasimüüja", "Retailer Animals" : "Mitte-varjupaiga loom.", "Retailer Book" : "Vahendajate raamat", "Retailer book" : "Vahendajate raamat", "Retailer movement successfully created." : "", "Retailer movements must have a valid movement date." : "", "Retriever" : "Retriever", "Return" : "Tagastatud", "Return Category" : "", "Return Date" : "Tagastamise kuupäev", "Return a transferred animal" : "", "Return an animal from adoption" : "", "Return an animal from another movement" : "", "Return an animal from transfer" : "", "Return date cannot be before the movement date." : "", "Return this movement and bring the animal back to the shelter" : "", "Returned" : "Tagastatud", "Returned By" : "Tagasitooja", "Returned To Owner" : "Tagastatud omanikule", "Returned from" : "", "Returned to" : "", "Returned to Owner {0}" : "Tagastatud omanikule", "Returning" : "", "Returns {0}" : "", "Reupload animal images every time" : "", "Rex" : "Rex", "Rhea" : "Nandu", "Rhinelander" : "Rhinelander", "Rhodesian Ridgeback" : "Rhodesian Ridgeback", "Ringneck/Psittacula" : "Ringneck/Psittacula", "Role is in use and cannot be deleted." : "", "Roles" : "", "Roles need a name." : "", "Rosella" : "Rosella", "Rostered day off" : "", "Rota" : "", "Rota Types" : "", "Rota cloned successfully." : "", "Rotate image 90 degrees anticlockwis" : "", "Rotate image 90 degrees clockwise" : "", "Rottweiler" : "Rottweiler", "Rough" : "Karm", "Rows" : "", "Ruddy" : "", "Russian Blue" : "Russian Blue (Vene sinine)", "S (Stray Cat)" : "T (Hulkuv kass)", "S = first letter of animal species" : "", "SM Account" : "", "SMS" : "", "SQL" : "SQL", "SQL Interface" : "", "SQL dump" : "", "SQL dump (ASM2 HSQLDB Format)" : "", "SQL editor: Press F11 to go full screen and press CTRL+SPACE to autocomplete table and column names" : "", "SQL interface" : "", "SQL is syntactically correct." : "", "SS = first and second letter of animal species" : "", "Sa" : "", "Saddlebred" : "Saddlebred", "Saint Bernard St. Bernard" : "Saint Bernard St. Bernard (Bernhardiin)", "Sales Tax" : "", "Saluki" : "Saluki", "Samoyed" : "Samoyed", "Sat" : "", "Satin" : "Satin", "Saturday" : "", "Save" : "", "Save and leave" : "", "Save this incident" : "", "Save this person" : "", "Save this record" : "Salvesta kirje", "Save this waiting list entry" : "", "Saving..." : "", "Scale published animal images to" : "", "Scheduled" : "", "Schipperke" : "Schipperke", "Schnauzer" : "Schnauzer", "Scottish Deerhound" : "Scottish Deerhound", "Scottish Fold" : "Scottish Fold", "Scottish Terrier Scottie" : "Scottish Terrier Scottie", "Script" : "", "Seal" : "", "Sealyham Terrier" : "Sealyham Terrier", "Search" : "Otsi", "Search Results for '{0}'" : "", "Search returned {0} results." : "", "Search sort order" : "", "Searchable" : "", "Second offence" : "", "Select" : "Vali", "Select a person" : "", "Select a person to attach this form to." : "", "Select a person to merge into this record. The selected person will be removed, and their movements, diary notes, log entries, etc. will be reattached to this record." : "", "Select all" : "", "Select an animal" : "Vali loom", "Select an animal to attach this form to." : "", "Select an animal to merge into this record. The selected animal will be removed, and their movements, diary notes, log entries, etc. will be reattached to this record." : "", "Select animal to merge" : "Vali loom", "Select animals" : "Vali loom", "Select date for diary task" : "", "Select person to merge" : "", "Select recommended" : "", "Selected On-Shelter Animals" : "Mitte-varjupaiga loom.", "Selkirk Rex" : "Selkirk Rex", "Send" : "Saada", "Send Emails" : "", "Send a weekly email to fosterers with medical information about their animals" : "Muu info looma kohta", "Send confirmation email to form submitter" : "", "Send emails" : "", "Send mass emails and perform mail merges" : "", "Send via email" : "", "Sending {0} emails is considered abusive and will damage the reputation of the email server." : "", "Sending..." : "Sorteerimine...", "Senior" : "Seenior", "Sent to mobile signing pad." : "", "Sep" : "september", "Separate waiting list rank by species" : "", "September" : "september", "Server clock adjustment" : "", "Set publishing options" : "", "Set this to 0 to never automatically remove." : "", "Set to 0 to never update urgencies." : "", "Set wether or not this user account can log in to the user interface." : "", "Setter" : "Setter", "Setting a location filter will prevent this user seeing animals who are not in these locations on shelterview, find animal and search." : "", "Settings" : "", "Settings, Lookup data" : "", "Settings, Options" : "", "Settings, Reports" : "", "Settings, System user accounts" : "", "Sex" : "Sugu", "Sex and Species" : "Vali liik", "Sexes" : "", "Shar Pei" : "Shar Pei", "Share" : "", "Shared weblink" : "Katkine omaniku link", "Shares" : "", "Sheep" : "Lammas", "Sheep Dog" : "Sheep Dog", "Shelter" : "", "Shelter Animal" : "Mitte-varjupaiga loom.", "Shelter Animals" : "Mitte-varjupaiga loom.", "Shelter Details" : "Varjupaiga üksikasjad", "Shelter animal {0} '{1}'" : "", "Shelter animals" : "Mitte-varjupaiga loom.", "Shelter code cannot be blank" : "", "Shelter code {0} has already been allocated to another animal." : "", "Shelter stats (all time)" : "", "Shelter stats (this month)" : "", "Shelter stats (this week)" : "", "Shelter stats (this year)" : "", "Shelter stats (today)" : "", "Shelter view" : "", "Shepherd" : "Shepherd", "Shetland Sheepdog Sheltie" : "Shetland Sheepdog Sheltie", "Shiba Inu" : "Shiba Inu", "Shift" : "", "Shih Tzu" : "Shih Tzu", "Short" : "Lühike", "Show GDPR Contact Opt-In field on person screens" : "", "Show PDF files inline instead of sending them as attachments" : "", "Show a cost field on medical/test/vaccination screens" : "", "Show a minimap of the address on person screens" : "", "Show a separate paid date field with costs" : "", "Show alerts on the home page" : "", "Show animal thumbnails in movement and medical books" : "", "Show animals adopted" : "", "Show codes on the shelter view screen" : "", "Show complete comments in table views" : "", "Show empty locations" : "", "Show on new record screens" : "", "Show quick links on all pages" : "", "Show quick links on the home page" : "", "Show report menu items in collapsed categories" : "", "Show short shelter codes on screens" : "", "Show the adoption fee field" : "Failis ei ole loovutusi.", "Show the altered fields" : "", "Show the breed fields" : "", "Show the brought in by field" : "", "Show the color field" : "", "Show the date brought in field" : "", "Show the entry category field" : "", "Show the full diary (instead of just my notes) on the home page" : "", "Show the hold fields" : "", "Show the internal location field" : "", "Show the litter ID field" : "", "Show the location unit field" : "", "Show the microchip fields" : "", "Show the original owner field" : "", "Show the size field" : "", "Show the tattoo fields" : "", "Show the time brought in field" : "", "Show the transfer in field" : "Näita ainult üleviimisi:", "Show the weight field" : "", "Show timeline on the home page" : "", "Show tips on the home page" : "", "Show transactions from" : "", "Show weight as lb rather than kg" : "", "Showing {0} timeline events." : "", "Siamese" : "Siamese (Siiam)", "Siberian" : "Siberian", "Siberian Husky" : "Siberian Husky", "Sick leave" : "", "Sick/Injured" : "Haige/vigastatud", "Sick/injured animal" : "", "Sign" : "", "Sign document" : "", "Sign on screen" : "", "Signature" : "", "Signed" : "", "Signing" : "", "Signing Pad" : "", "Signup" : "", "Silky Terrier" : "Silky Terrier", "Silver" : "Silver", "Silver Fox" : "Silver Fox", "Silver Marten" : "Silver Marten", "Similar Animal" : "", "Similar Person" : "", "Simple" : "Lihtne", "Singapura" : "Singapura", "Single Treatment" : "Ühekordne ravi", "Site" : "Hammustus/hammustama", "Sites" : "Suurus", "Size" : "Suurus", "Sizes" : "Suurus", "Skunk" : "Skunk", "Skye Terrier" : "Skye Terrier", "Sloughi" : "Sloughi", "Small" : "Väike", "SmartTag PETID" : "", "Smooth Fox Terrier" : "Smooth Fox Terrier", "Snake" : "Madu", "Snowshoe" : "Snowshoe", "Social" : "", "Softbill (Other)" : "Softbill (Other)", "Sold" : "", "Somali" : "Somali", "Some batch processes may take a few minutes to run and could prevent other users being able to use the system for a short time." : "", "Some browsers allow shortcut keys, press SHIFT+ALT+A in Chrome or Firefox to jump to the animal adoption screen." : "", "Some info text" : "", "Sorrel" : "", "Sorrel Tortoiseshell" : "", "Sorry, this document has already been signed" : "", "South Russian Ovcharka" : "South Russian Ovcharka", "Spaniel" : "Spaniel", "Special Needs" : "", "Species" : "Liik", "Species A-Z" : "", "Species Z-A" : "", "Species to use when publishing to third party services and adoption sites" : "", "Specifying a reschedule date will make copies of the selected vaccinations and mark them to be given on the reschedule date. Example: If this vaccination needs to be given every year, set the reschedule date to be 1 year from today." : "", "Sphynx (hairless cat)" : "Sphynx (hairless cat) (Sfinks)", "Spitz" : "Spitz", "Split baby/adult age at" : "", "Split species pages with a baby/adult prefix" : "", "Sponsorship donations" : "", "Staff" : "Personal", "Staff Rota" : "", "Staff record" : "", "Staff rota" : "", "Staffordshire Bull Terrier" : "Staffordshire Bull Terrier", "Standard" : "", "Standardbred" : "Standardbred", "Start Date" : "Alguskuupäev", "Start Of Day" : "", "Start Time" : "", "Start at" : "Alguskuupäev", "Start date" : "Alguskuupäev", "Start date must be a valid date" : "", "Start of year" : "", "Started" : "", "Starts" : "Olek", "State" : "Olek", "State contains" : "", "Stationary costs" : "", "Stats" : "Olek", "Stats period" : "", "Stats show running figures for the selected period of animals entering and leaving the shelter on the home page." : "", "Status" : "Olek", "Status and Species" : "Vali liik", "Stay" : "Hulkuv", "Stock" : "", "Stock Control" : "", "Stock Levels" : "", "Stock Locations" : "", "Stock Take" : "", "Stock Usage Type" : "", "Stock level must have a name" : "", "Stock level must have a unit" : "", "Stock needs a name and unit." : "", "Stocktake" : "", "Stolen" : "Varastatud", "Stolen {0}" : "", "Stop" : "", "Stop Publishing" : "", "Stores" : "", "Stray" : "Hulkuv", "Su" : "", "SubTotal" : "Vahesumma", "Subject" : "Teema", "Submission received: {0}" : "", "Success" : "", "Successfully attached to {0}" : "", "Sugar Glider" : "Liugurkuskus", "Sun" : "", "Sunday" : "", "Super user" : "", "Superuser" : "", "Surname" : "Perekonnanimi", "Surrender" : "Praegune veterinaar", "Surrender Pickup" : "", "Suspect" : "", "Suspect 1" : "", "Suspect 2" : "", "Suspect 3" : "", "Suspect/Animal" : "", "Swan" : "Luik", "Swedish Vallhund" : "Swedish Vallhund", "Syntax check this SQL" : "", "System" : "Süsteem", "System Admin" : "", "System Options" : "", "System user accounts" : "", "T = first letter of animal type" : "", "TNR" : "", "TNR - Trap/Neuter/Release" : "", "TT = first and second letter of animal type" : "", "Tabby" : "Vöödiline", "Tabby and White" : "Vöödiline ja valge", "Take another payment" : "", "Taken By" : "", "Tan" : "Kollakaspruun", "Tan and Black" : "Kollakaspruun mustaga", "Tan and White" : "Kollakaspruun ja must", "Task complete." : "Skanneerimine valmis", "Task items are executed in order of index, lowest to highest" : "", "Tattoo" : "", "Tattoo Date" : "", "Tattoo Number" : "", "Tax" : "", "Tax Amount" : "", "Tax Rate %" : "", "Telephone" : "Telefon", "Telephone Bills" : "", "Template" : "", "Template Name" : "", "Template names can include a path portion with /, eg: Vets/Rabies Certificate" : "", "Tennessee Walker" : "Tennessee Walker", "Terrapin" : "Lamekilpkonn", "Terrier" : "Terrier", "Test" : "Tekst", "Test Animal" : "Kaotatud loom", "Test Book" : "", "Test Performed" : "", "Test Results" : "", "Test Types" : "Kulu tüübid", "Test book" : "", "Test marked as performed for {0} - {1}" : "", "Tests" : "Tekst", "Tests need an animal and at least a required date." : "", "Text" : "Tekst", "Text Encoding" : "", "Th" : "", "Thai Ridgeback" : "Thai Ridgeback", "Thank you for choosing Animal Shelter Manager for your shelter!" : "", "Thank you, the document is now signed." : "", "That animal is already linked to the incident" : "", "The CSV file should be created by PayPal's \"All Activity\" report." : "", "The SmartTag PETID number" : "", "The SmartTag type" : "", "The URL is the address of a web resource, eg: www.youtube.com/watch?v=xxxxxx" : "", "The animal name" : "", "The animal record to merge must be different from the original." : "", "The animal sex" : "", "The base color of this animal" : "", "The coat type of this animal" : "", "The confirmation email message to send to the form submitter. Leave blank to send a copy of the completed form." : "", "The database will be inaccessible to all users while the export is in progress." : "", "The date reported to the shelter" : "", "The date the animal died" : "Looma surma kuupäev", "The date the animal was FIV/L tested" : "", "The date the animal was adopted" : "", "The date the animal was altered" : "", "The date the animal was born" : "", "The date the animal was brought into the shelter" : "Looma varjupaika toomise kuupäev", "The date the animal was heartworm tested" : "", "The date the animal was microchipped" : "", "The date the animal was reclaimed" : "", "The date the animal was tattooed" : "", "The date the foster animal will be returned if known" : "", "The date the foster is effective from" : "", "The date the litter entered the shelter" : "", "The date the owner last contacted the shelter" : "", "The date the payment was received" : "", "The date the reservation is effective from" : "", "The date the retailer movement is effective from" : "", "The date the transfer is effective from" : "", "The date the trial adoption is over" : "", "The date the vaccination is required/due to be administered" : "Looma varjupaika tagasi toomise kuupäev", "The date the vaccination was administered" : "Broneeringu tühistamise kuupäev", "The date this animal was found" : "", "The date this animal was lost" : "", "The date this animal was put on the waiting list" : "", "The date this animal was removed from the waiting list" : "", "The date this animal was reserved" : "", "The date this animal was returned to its owner" : "Looma omanikult tagastamise kuupäev", "The date this person was homechecked." : "", "The default username is 'user' with the password 'letmein'" : "", "The entry reason for this animal" : "", "The litter this animal belongs to" : "", "The locale determines the language ASM will use when displaying text, dates and currencies." : "", "The location where the animal was picked up" : "", "The microchip number" : "", "The movement number '{0}' is not unique." : "", "The number of stock records to create" : "", "The period in days before waiting list urgency is increased" : "", "The person record to merge must be different from the original." : "", "The primary breed of this animal" : "", "The reason the owner wants to part with the animal" : "", "The reason this animal was removed from the waiting list" : "", "The remaining units in the container" : "", "The result of the FIV test" : "", "The result of the FLV test" : "", "The result of the heartworm test" : "", "The retail/resale price per unit" : "", "The secondary breed of this animal" : "", "The selected file is not an image." : "", "The shelter category for this animal" : "", "The shelter reference number" : "", "The sheltermanager.com admin account password cannot be changed here, please visit {0}" : "", "The size of this animal" : "", "The species of this animal" : "", "The tattoo number" : "", "The type of unit in the container, eg: tablet, vial, etc." : "", "The veterinary license number." : "", "The wholesale/trade price the container was bought for" : "", "There is not enough information in the form to attach to a shelter animal record (need an animal name)." : "", "There is not enough information in the form to create a found animal record (need a description and area found)." : "", "There is not enough information in the form to create a lost animal record (need a description and area lost)." : "", "There is not enough information in the form to create a person record (need a surname)." : "", "There is not enough information in the form to create a transport record (need animalname)." : "", "There is not enough information in the form to create a transport record (need pickupdate and dropoffdate)." : "", "There is not enough information in the form to create a waiting list record (need a description)." : "", "There is not enough information in the form to create an incident record (need call notes and dispatch address)." : "", "These are the HTML headers and footers used when displaying online forms." : "", "These are the HTML headers and footers used when generating reports." : "", "These are the default values for these fields when creating new records." : "", "These batch processes are run each night by the system and should not need to be run manually." : "", "These fields allow you to deduct stock for the test(s) given. This single deduction should cover the selected tests being performed." : "", "These fields allow you to deduct stock for the treatment(s) given. This single deduction should cover the selected treatments being administered." : "", "These fields allow you to deduct stock for the vaccination(s) given. This single deduction should cover the selected vaccinations being administered." : "", "These fields determine which columns are shown on the find animal and find person screens." : "", "These numbers are for shelters who have agreements with insurance companies and are given blocks of policy numbers to allocate." : "", "These options change the behaviour of the search box at the top of the page." : "", "These values are required for correct operation of the system. ONLY change them if you are translating to another language." : "", "Third offence" : "", "This Month" : "", "This Week" : "", "This Year" : "", "This animal already has an active reservation." : "", "This animal has a SmartTag PETID" : "", "This animal has a tattoo" : "", "This animal has active reservations, they will be cancelled." : "", "This animal has an adoption fee of {0}" : "", "This animal has been FIV/L tested" : "", "This animal has been altered" : "", "This animal has been declawed" : "", "This animal has been heartworm tested" : "", "This animal has movements and cannot be removed." : "", "This animal has not been altered." : "", "This animal has not been microchipped." : "Kõikidel loomadel on varjupaigas mikrokiip.", "This animal has special needs" : "", "This animal has the same name as another animal recently added to the system." : "", "This animal is a crossbreed" : "", "This animal is bonded with {0}" : "", "This animal is bonded with {0}. Adoption movement records will be created for all bonded animals." : "", "This animal is currently at a retailer, it will be automatically returned first." : "", "This animal is currently fostered, it will be automatically returned first." : "", "This animal is currently held and cannot be adopted." : "", "This animal is currently quarantined and should not leave the shelter." : "", "This animal is marked not for adoption." : "", "This animal is microchipped" : "", "This animal is not on the shelter." : "", "This animal is part of a cruelty case and should not leave the shelter." : "", "This animal should be held in case it is reclaimed" : "", "This animal should not be shown in figures and is not in the custody of the shelter" : "", "This animal was dead on arrival to the shelter" : "", "This animal was euthanized" : "", "This animal was picked up" : "", "This animal was transferred from another shelter" : "", "This code has already been used." : "", "This database is locked and in read-only mode. You cannot add, change or delete records." : "", "This database is locked." : "", "This date of birth is an estimate" : "", "This expense account is the source for costs of this type" : "", "This income account is the source for payments received of this type" : "", "This item is referred to in the database ({0}) and cannot be deleted until it is no longer in use." : "", "This many years after creation of a person record, the name, address and telephone data will be anonymized." : "", "This month" : "", "This movement cannot be from a retailer when the animal has no prior retailer movements." : "", "This person has an animal control incident against them" : "", "This person has an animal control incident against them." : "", "This person has been banned from adopting animals" : "", "This person has been banned from adopting animals." : "", "This person has been under investigation" : "", "This person has been under investigation." : "", "This person has movements and cannot be removed." : "", "This person has not passed a homecheck" : "", "This person has not passed a homecheck." : "", "This person has payments and cannot be removed." : "", "This person has previously surrendered an animal." : "", "This person is linked to a waiting list record and cannot be removed." : "", "This person is linked to an animal and cannot be removed." : "", "This person is linked to an investigation and cannot be removed." : "", "This person is linked to animal control and cannot be removed." : "", "This person is linked to animal licenses and cannot be removed." : "", "This person is linked to animal transportation and cannot be removed." : "", "This person is linked to citations and cannot be removed." : "", "This person is linked to found animals and cannot be removed." : "", "This person is linked to lost animals and cannot be removed." : "", "This person is linked to trap loans and cannot be removed." : "", "This person is not flagged as a fosterer and cannot foster animals." : "", "This person is not flagged as a retailer and cannot handle retailer movements." : "", "This person is very similar to another person on file, carry on creating this record?" : "", "This person lives in the same area as the person who brought the animal to the shelter." : "", "This record has been changed by another user, please reload." : "", "This report cannot be sent by email as it requires criteria to run." : "", "This screen allows you to add extra documents to your database, for staff training, reference materials, etc." : "", "This screen allows you to add extra images to your database, for use in reports and documents." : "", "This type of movement requires a date." : "", "This type of movement requires a person." : "", "This week" : "", "This will permanently remove the selected records, are you sure?" : "", "This will permanently remove the selected roles, are you sure?" : "", "This will permanently remove the selected user accounts. Are you sure?" : "", "This will permanently remove this account and ALL TRANSACTIONS HELD AGAINST IT. This action is irreversible, are you sure you want to do this?" : "", "This will permanently remove this additional field and ALL DATA CURRENTLY HELD AGAINST IT. This action is irreversible, are you sure you want to do this?" : "", "This will permanently remove this animal, are you sure?" : "", "This will permanently remove this incident, are you sure?" : "", "This will permanently remove this person, are you sure?" : "", "This will permanently remove this record, are you sure?" : "", "This will permanently remove this waiting list entry, are you sure?" : "", "This will remove ALL rota entries for the week beginning {0}. This action is irreversible, are you sure?" : "", "This year" : "", "Thoroughbred" : "Thoroughbred", "Thu" : "", "Thumbnail size" : "", "Thursday" : "", "Tibetan Mastiff" : "Tibetan Mastiff", "Tibetan Spaniel" : "Tibetan Spaniel", "Tibetan Terrier" : "Tibetan Terrier", "Tiger" : "Tiger", "Time" : "", "Time Brought In" : "Toomise kuupäev", "Time On List" : "", "Time On Shelter" : "", "Time on list" : "", "Time on shelter" : "", "Timeline" : "", "Timeline ({0})" : "", "Times should be in HH:MM format, eg: 09:00, 16:30" : "", "Title" : "Pealkiri", "Title First Last" : "", "Title Initials Last" : "", "To" : "", "To Adoption" : "", "To Fostering" : "Kasuperesse", "To Other" : "Muusse", "To Retailer" : "", "To add people to the rota, create new person records with the staff or volunteer flag." : "", "To continue using ASM, please renew {0}" : "", "To week beginning" : "", "Today" : "", "Tonkinese" : "Tonkinese", "Too Many Animals" : "Klooni loomi", "Tooltip" : "Vihje", "Top Margin" : "", "Tortie" : "Kilpkonn", "Tortie and White" : "", "Tortoise" : "Kilpkonn", "Tosa Inu" : "Tosa Inu", "Total" : "", "Total number of units in the container" : "", "Total payments" : "", "Toucan" : "Tuukan", "Toy Fox Terrier" : "Toy Fox Terrier", "Training" : "", "Transactions" : "", "Transactions need a date and description." : "", "Transfer" : "Ülekanne", "Transfer In" : "Ületoodud", "Transfer To" : "Ületoodud", "Transfer an animal" : "", "Transfer from Municipal Shelter" : "", "Transfer from Other Shelter" : "", "Transfer successfully created." : "", "Transfer?" : "Ülekanne", "Transferred" : "Edastatud", "Transferred From" : "Ületoodud", "Transferred In" : "Ületoodud", "Transferred In {0}" : "Ületoodud", "Transferred Out" : "Üleviidud", "Transferred Out {0}" : "Üleviidud", "Transfers must have a valid transfer date." : "", "Transport" : "", "Transport Book" : "", "Transport Types" : "Kulu tüübid", "Transport book" : "", "Transport requires an animal" : "", "Transports must have valid pickup and dropoff dates and times." : "", "Trap Loans" : "", "Trap Number" : "", "Trap Types" : "", "Trap loan" : "", "Trap loans" : "", "Treat animals at retailers as part of the shelter inventory" : "", "Treat foster animals as part of the shelter inventory" : "", "Treat trial adoptions as part of the shelter inventory" : "", "Treatment" : "ravid", "Treatment Given" : "", "Treatment marked as given for {0} - {1}" : "", "Treatment name cannot be blank" : "", "Treatments" : "ravid", "Treeing Walker Coonhound" : "Treeing Walker Coonhound", "Trial Adoption" : "", "Trial adoption" : "", "Trial adoption book" : "", "Trial ends on" : "", "Tricolour" : "Tricolour", "Trigger Batch Processes" : "", "Tu" : "", "Tue" : "", "Tuesday" : "", "Tumblr" : "", "Turkey" : "Türgi", "Turkish Angora" : "Turkish Angora", "Turkish Van" : "Turkish Van", "Turtle" : "Kilpkonn", "Twitter" : "", "Type" : "Tüüp", "Type of animal links to show" : "", "U (Unwanted Cat)" : "S (Soovimatu kass)", "UK Giftaid" : "", "URL" : "", "UUUUUUUUUU or UUUU = unique number" : "", "Unable to Afford" : "Ei jõua ülal pidada", "Unable to Cope" : "Ei saa hakkama", "Unaltered" : "", "Unaltered Adopted Animals" : "", "Unaltered Dog - 1 year" : "", "Unaltered Dog - 3 year" : "", "Unavailable" : "", "Under {0} weeks old" : "", "Unit" : "", "Unit Price" : "", "Unit within the location, eg: pen or cage number" : "", "Units" : "", "Unknown" : "Tundmatu", "Unknown microchip brand" : "", "Unpaid Fines" : "", "Unreserved" : "Broneerimata", "Unsaved Changes" : "Salvestamata muudatused", "Unspecified" : "Määramata", "Unsuitable Accomodation" : "Ebasobivad tingimused", "Up for adoption" : "Loom ei ole loovutamiseks", "Upcoming medical items" : "", "Update" : "", "Update publishing options" : "", "Update system options" : "", "Update the daily boarding cost for this animal" : "", "Updated database to version {0}" : "", "Updated." : "", "Updating..." : "", "Upload" : "", "Upload Document" : "", "Upload ODT" : "", "Upload Photo" : "", "Upload a new OpenOffice template" : "", "Upload all available images for animals" : "", "Upload an SQL script" : "", "Upload splash.jpg and logo.jpg to override the login screen image and logo at the top left of ASM." : "", "Uploading..." : "", "Urgencies" : "", "Urgency" : "Pakiline vajadus", "Urgent" : "Kiireloomuline", "Usage Date" : "", "Usage Type" : "", "Usage explains why this stock record was created or adjusted. Usage records will only be created if the balance changes." : "", "Use Automatic Insurance Numbers" : "", "Use HTML5 client side image scaling where available to speed up image uploads" : "", "Use SQL Interface" : "", "Use a single breed field" : "", "Use animal comments" : "", "Use fancy tooltips" : "", "Use notes from preferred photo" : "", "Use the icon in the lower right of notes fields to view them in a separate window." : "", "User Accounts" : "", "User Roles" : "", "User accounts that will only ever call the Service API should set this to No." : "", "User roles" : "", "Username" : "", "Username '{0}' already exists" : "", "Users" : "", "Users need a username, password and at least one role or the superuser flag setting." : "", "Vacation" : "Asukoht", "Vaccinate" : "", "Vaccinate Animal" : "", "Vaccination" : "Vaktsineerimine", "Vaccination Book" : "Vaktsineerimisraamat", "Vaccination Given" : "Vaktsiineerimise tüübid", "Vaccination Types" : "Vaktsiineerimise tüübid", "Vaccination book" : "Vaktsineerimisraamat", "Vaccination marked as given for {0} - {1}" : "", "Vaccinations" : "Vaktsineerimine", "Vaccinations need an animal and at least a required date." : "", "Vaccinations require an animal" : "", "Vaccinations: {0}, Tests: {1}, Medical Treatments: {2}, Transport: {3}, Costs: {4}, Total Costs: {5} Total Payments: {6}, Balance: {7}" : "", "Valid tokens for the subject and text" : "", "Value" : "", "Various" : "Mitmesugune", "Vertical Pitch" : "", "Very Large" : "Väga suur", "Vet" : "Veterinaar", "Vet Visit" : "", "Victim" : "", "Victim Name" : "", "Video Link" : "", "Vietnamese Pot Bellied" : "Vietnamese Pot Bellied", "View" : "", "View Accounts" : "", "View Animals" : "Vaata loomi", "View Audit Trail" : "", "View Citations" : "", "View Clinic Appointment" : "", "View Cost" : "Uus maksumus", "View Diary" : "", "View Diets" : "", "View Document" : "", "View Document Repository" : "", "View Found Animal" : "Leia leitud loom", "View Incidents" : "", "View Incoming Forms" : "", "View Investigations" : "", "View Licenses" : "", "View Litter" : "", "View Log" : "", "View Lost Animal" : "Leia kadunud loom", "View Manual" : "", "View Media" : "", "View Medical Records" : "", "View Movement" : "", "View PDF" : "", "View Payments" : "", "View Person" : "", "View Person Links" : "", "View Report" : "", "View Roles" : "", "View Rota" : "", "View Shelter Animals" : "Mitte-varjupaiga loom.", "View Staff Person Records" : "", "View Stock" : "", "View Tests" : "", "View Training Videos" : "", "View Transport" : "", "View Trap Loans" : "", "View Vaccinations" : "Uus vaktsineerimine", "View Volunteer Person Records" : "", "View Vouchers" : "Uus vautšer", "View Waiting List" : "Vaata ootenimekirja", "View animals matching publishing options" : "", "View littermates" : "", "View matching records" : "", "View media" : "", "View publishing logs" : "", "Visual Theme" : "", "Vizsla" : "Vizsla", "Volunteer" : "Vabatahtlik", "Voucher Types" : "", "Vouchers" : "Vautšerid", "Vouchers need an issue and expiry date." : "", "WARNING: This animal has not been microchipped" : "HOIATUS: selle omaniku kodu ei ole kontrollitud", "WARNING: This animal is over 6 months old and has not been neutered/spayed" : "", "Waiting" : "Hammustamine", "Waiting List" : "Ootenimekiri", "Waiting List - Additional" : "", "Waiting List - Details" : "", "Waiting List - Removal" : "", "Waiting List Contact" : "Ootenimekirja kontakt", "Waiting List Donation" : "Ootenimekirja annetus", "Waiting List {0}" : "Ootenimekiri: {0}", "Waiting List: {0}" : "Ootenimekiri: {0}", "Waiting Room" : "Ootenimekiri", "Waiting for documents..." : "", "Waiting list donations" : "Ootenimekirja annetus", "Waiting list entries matching '{0}'." : "", "Waiting list entries must have a contact" : "", "Waiting list entry for {0} ({1})" : "", "Waiting list entry successfully added." : "", "Waiting list urgency update period in days" : "", "Warmblood" : "Warmblood", "Warn if the name of the new animal is similar to one entered recently" : "", "Warn when adopting an animal who has not been microchipped" : "Hoiata mind kui hakkan loovutama looma inimesele, kes on oma loomast loobunud", "Warn when adopting an unaltered animal" : "Hoiata kui hakkan loovutama loomapidamiskeeluga omanikule", "Warn when adopting to a person who has been banned from adopting animals" : "", "Warn when adopting to a person who has not been homechecked" : "", "Warn when adopting to a person who has previously brought an animal to the shelter" : "", "Warn when adopting to a person who lives in the same area as the original owner" : "", "Warn when creating multiple reservations on the same animal" : "", "Warnings" : "Hoiatused", "Wasted" : "", "Water Bills" : "", "We" : "", "Wed" : "", "Wednesday" : "", "Week" : "", "Week beginning {0}" : "", "Weekly" : "Iganädalane", "Weight" : "Kaal", "Weimaraner" : "Weimaraner", "Welcome!" : "", "Welsh Corgi" : "Welsh Corgi", "Welsh Springer Spaniel" : "Welsh Springer Spaniel", "Welsh Terrier" : "Welsh Terrier", "West Highland White Terrier Westie" : "West Highland White Terrier Westie", "Wheaten Terrier" : "Wheaten Terrier", "When" : "", "When ASM should stop showing this message" : "", "When I change the location of an animal, make a note of it in the log with this type" : "", "When I change the weight of an animal, make a note of it in the log with this type" : "", "When I generate a document, make a note of it in the log with this type" : "", "When I mark an animal held, make a note of it in the log with this type" : "", "When I set a new GDPR Opt-In contact option, make a note of it in the log with this type" : "", "When a message is created, email it to each matching user" : "", "When creating payments from the Move menu screens, mark them due instead of received" : "", "When displaying calendars, the first day of the week is" : "", "When displaying person names, use the format" : "", "When entering dates, hold down CTRL and use the cursor keys to move around the calendar. Press t to go to today." : "", "When entering vaccinations, default the last batch number and manufacturer for that type" : "", "When matching lost animals, include shelter animals" : "", "When publishing to third party services, add this extra text to the bottom of all animal descriptions" : "", "When receiving multiple payments, allow the due and received dates to be set" : "", "When receiving payments, allow a quantity and unit price to be set" : "", "When receiving payments, allow recording of sales tax with a default rate of" : "", "When receiving payments, allow the deposit account to be overridden" : "", "When you use Move > Adopt an animal, ASM will automatically return any open foster or retailer movement before creating the adoption." : "", "When you use Move > Foster an animal, ASM will automatically return any open foster movement before moving the animal to its new home." : "", "Where this animal is located within the shelter" : "", "Whippet" : "Whippet", "White" : "Valge", "White German Shepherd" : "White German Shepherd (Valge saksa lambakoer)", "White and Black" : "Valge ja must", "White and Brindle" : "Valge ja hallikaspruun", "White and Brown" : "Valge ja pruun", "White and Grey" : "Valge ja hall", "White and Liver" : "Valge ja maksavärvi", "White and Tabby" : "Valge ja vöödiline", "White and Tan" : "Valge ja kollakaspruun", "White and Torti" : "Valge ja kilpkonnavärvi", "Will this owner give a donation?" : "", "Wire-haired Pointing Griffon" : "Wire-haired Pointing Griffon", "Wirehaired Terrier" : "Wirehaired Terrier", "With Vet" : "", "With overnight batch" : "", "Withdrawal" : "", "Wk" : "", "Work" : "", "Work Phone" : "", "Work Types" : "", "XXX or XX = number unique for this year" : "", "Xoloitzcuintle/Mexican Hairless" : "Xoloitzcuintle/Mexican Hairless", "YY or YYYY = current year" : "", "Yellow Labrador Retriever" : "Yellow Labrador Retriever", "Yellow and Grey" : "Kollane ja hall", "Yes" : "Jah", "Yes/No" : "Jah/ei", "Yes/No/Unknown" : "", "Yorkshire Terrier Yorkie" : "Yorkshire Terrier Yorkie", "You can bookmark search results, animals, people and most data entry screens." : "", "You can drag and drop animals in shelter view to change their locations." : "", "You can middle click a link to open it in a new browser tab (push the wheel on most modern mice)." : "", "You can override the search result sort by adding one of the following to the end of your search - sort:az, sort:za, sort:mr, sort:lr" : "", "You can prefix your term in the search box with a: to search only animals, p: to search only people, wl: to search waiting list entries, la: to search lost animals and fa: to search found animals." : "", "You can set a default amount for different payment types in the Settings- Lookup Data screen. Very handy when creating adoptions." : "", "You can sort tables by clicking on the column headings." : "", "You can upload images called logo.jpg and splash.jpg to the Settings- Reports-Extra Images screen to override the login splash screen and logo in the upper left corner of the application." : "", "You can use incoming forms to create new records or attach them to existing records." : "", "You can't have a return without a movement." : "", "You didn't specify any search criteria, so an on-shelter search was assumed." : "", "You have unsaved changes, are you sure you want to leave this page?" : "", "You must supply a code." : "", "Young Adult" : "Nooruk", "Your CSV file should have a header row with field names ASM recognises." : "", "Your sheltermanager.com account is due to expire on {0}, please renew {1}" : "", "Zipcode" : "", "Zipcode contains" : "", "[None]" : "[Puudub]", "after connecting, chdir to" : "", "and" : "ja", "are sent to" : "", "at" : "", "cm" : "", "days" : "päev(a)", "estimate" : "Hinnanguline", "filters: a:animal, p:person, wl:waitinglist, la:lostanimal, fa:foundanimal keywords: onshelter/os, notforadoption, aco, banned, donors, deceased, vets, retailers, staff, fosterers, volunteers, homecheckers, members, activelost, activefound" : "", "inches" : "", "invalid" : "", "kg" : "", "lb" : "", "less" : "", "mins" : "", "months" : "kuud", "more" : "", "on" : "", "or" : "", "or estimated age in years" : "", "oz" : "", "to" : "", "today" : "", "treatments" : "ravid", "treatments, every" : "", "weekdays" : "", "weeks" : "nädal", "weeks after last contact." : "", "years" : "aastad", "yesterday" : "", "{0} (under {1} months)" : "", "{0} - {1} ({2} {3} aged {4})" : "", "{0} - {1} {2}" : "", "{0} - {1} {2} ({3}), contact {4} ({5}) - lost in {6}, postcode {7}, on {8}" : "{0} - {1} {2} ({3}), kontakt {4} ({5}) - kaotatod {6}, indeks {7}, {8}", "{0} animals successfully updated." : "", "{0} cannot be blank" : "", "{0} fine, paid" : "", "{0} fine, unpaid" : "", "{0} incurred in costs" : "", "{0} is running ({1}&#37; complete)." : "", "{0} payment records created." : "", "{0} received" : "", "{0} record(s) match the mail merge." : "", "{0} results." : "", "{0} rows affected." : "", "{0} selected" : "", "{0} treatments every {1} days" : "", "{0} treatments every {1} months" : "", "{0} treatments every {1} weekdays" : "", "{0} treatments every {1} weeks" : "", "{0} treatments every {1} years" : "", "{0} {1} ({2} treatments)" : "", "{0} {1} aged {2}" : "", "{0} {1} {2} aged {3}" : "", "{0} {1}: Moved from {2} to {3}" : "", "{0} {1}: adopted by {2}" : "", "{0} {1}: altered" : "", "{0} {1}: available for adoption" : "", "{0} {1}: died ({2})" : "", "{0} {1}: entered the shelter" : "", "{0} {1}: escaped" : "", "{0} {1}: euthanised ({2})" : "", "{0} {1}: fostered to {2}" : "", "{0} {1}: held" : "", "{0} {1}: microchipped" : "", "{0} {1}: not available for adoption" : "", "{0} {1}: quarantined" : "", "{0} {1}: received {2}" : "", "{0} {1}: reclaimed by {2}" : "", "{0} {1}: released" : "", "{0} {1}: reserved by {2}" : "", "{0} {1}: returned by {2}" : "", "{0} {1}: sent to retailer {2}" : "", "{0} {1}: stolen" : "", "{0} {1}: tested positive for FIV" : "", "{0} {1}: tested positive for FeLV" : "", "{0} {1}: tested positive for Heartworm" : "", "{0} {1}: transferred to {2}" : "", "{0}, Week {1}" : "", "{0}: Entered shelter {1}, Last changed on {2} by {3}. {4} {5} {6} aged {7}" : "", "{0}: closed {1} ({2})" : "", "{0}: opened {1}" : "", "{0}: waiting list - {1}" : "", "{0}: {1} {2} - {3} {4}" : "", "{2}: found in {1}: {0}" : "", "{2}: lost in {1}: {0}" : "", "{plural0} animal as dead on arrival" : "", "{plural0} animal control call due for followup today" : "", "{plural0} animal died" : "", "{plural0} animal entered the shelter" : "", "{plural0} animal has a hold ending today" : "", "{plural0} animal has been on the shelter longer than {0} months" : "", "{plural0} animal is not available for adoption" : "", "{plural0} animal was adopted" : "", "{plural0} animal was euthanized" : "", "{plural0} animal was reclaimed by its owner" : "", "{plural0} animal was transferred to another shelter" : "", "{plural0} day." : "", "{plural0} incomplete animal control call" : "", "{plural0} item of stock expires in the next month" : "", "{plural0} item of stock has expired" : "", "{plural0} medical treatment needs to be administered today" : "", "{plural0} month." : "", "{plural0} new online form submission" : "", "{plural0} person has an overdue payment" : "", "{plural0} person with an active reservation has not been homechecked" : "", "{plural0} potential match for a lost animal" : "", "{plural0} recent publisher run had errors" : "", "{plural0} reservation has been active over a week without adoption" : "", "{plural0} result found in {1} seconds. Order: {2}" : "", "{plural0} shelter animal has not been microchipped" : "", "{plural0} shelter animal has people looking for them" : "", "{plural0} test needs to be performed today" : "", "{plural0} transport does not have a driver assigned" : "", "{plural0} trap is overdue for return" : "", "{plural0} trial adoption has ended" : "", "{plural0} unaltered animal has been adopted in the last month" : "", "{plural0} undispatched animal control call" : "", "{plural0} unpaid fine" : "", "{plural0} urgent entry on the waiting list" : "", "{plural0} vaccination has expired" : "", "{plural0} vaccination needs to be administered today" : "", "{plural0} week." : "", "{plural0} year." : "", "{plural1} animal control calls due for followup today" : "", "{plural1} animals are not available for adoption" : "", "{plural1} animals died" : "", "{plural1} animals entered the shelter" : "", "{plural1} animals have been on the shelter longer than {0} months" : "", "{plural1} animals have holds ending today" : "", "{plural1} animals were adopted" : "", "{plural1} animals were dead on arrival" : "", "{plural1} animals were euthanized" : "", "{plural1} animals were reclaimed by their owners" : "", "{plural1} animals were transferred to other shelters" : "", "{plural1} days." : "", "{plural1} incomplete animal control calls" : "", "{plural1} items of stock expire in the next month" : "", "{plural1} items of stock have expired" : "", "{plural1} medical treatments need to be administered today" : "", "{plural1} months." : "", "{plural1} new online form submissions" : "", "{plural1} people have overdue payments" : "", "{plural1} people with active reservations have not been homechecked" : "", "{plural1} potential matches for lost animals" : "", "{plural1} recent publisher runs had errors" : "", "{plural1} reservations have been active over a week without adoption" : "", "{plural1} results found in {1} seconds. Order: {2}" : "", "{plural1} shelter animals have not been microchipped" : "", "{plural1} shelter animals have people looking for them" : "", "{plural1} tests need to be performed today" : "", "{plural1} transports do not have a driver assigned" : "", "{plural1} traps are overdue for return" : "", "{plural1} trial adoptions have ended" : "", "{plural1} unaltered animals have been adopted in the last month" : "", "{plural1} undispatched animal control calls" : "", "{plural1} unpaid fines" : "", "{plural1} urgent entries on the waiting list" : "", "{plural1} vaccinations have expired" : "", "{plural1} vaccinations need to be administered today" : "", "{plural1} weeks." : "", "{plural1} years." : "", "{plural2} animal control calls due for followup today" : "", "{plural2} animals are not available for adoption" : "", "{plural2} animals died" : "", "{plural2} animals entered the shelter" : "", "{plural2} animals have been on the shelter longer than {0} months" : "", "{plural2} animals have holds ending today" : "", "{plural2} animals were adopted" : "", "{plural2} animals were dead on arrival" : "", "{plural2} animals were euthanized" : "", "{plural2} animals were reclaimed by their owners" : "", "{plural2} animals were transferred to other shelters" : "", "{plural2} days." : "", "{plural2} incomplete animal control calls" : "", "{plural2} items of stock expire in the next month" : "", "{plural2} items of stock have expired" : "", "{plural2} medical treatments need to be administered today" : "", "{plural2} months." : "", "{plural2} new online form submissions" : "", "{plural2} people have overdue payments" : "", "{plural2} people with active reservations have not been homechecked" : "", "{plural2} potential matches for lost animals" : "", "{plural2} recent publisher runs had errors" : "", "{plural2} reservations have been active over a week without adoption" : "", "{plural2} results found in {1} seconds. Order: {2}" : "", "{plural2} shelter animals have not been microchipped" : "", "{plural2} shelter animals have people looking for them" : "", "{plural2} tests need to be performed today" : "", "{plural2} transports do not have a driver assigned" : "", "{plural2} traps are overdue for return" : "", "{plural2} trial adoptions have ended" : "", "{plural2} unaltered animals have been adopted in the last month" : "", "{plural2} undispatched animal control calls" : "", "{plural2} unpaid fines" : "", "{plural2} urgent entries on the waiting list" : "", "{plural2} vaccinations have expired" : "", "{plural2} vaccinations need to be administered today" : "", "{plural2} weeks." : "", "{plural2} years." : "", "{plural3} animal control calls due for followup today" : "", "{plural3} animals are not available for adoption" : "", "{plural3} animals died" : "", "{plural3} animals entered the shelter" : "", "{plural3} animals have been on the shelter longer than {0} months" : "", "{plural3} animals have holds ending today" : "", "{plural3} animals were adopted" : "", "{plural3} animals were dead on arrival" : "", "{plural3} animals were euthanized" : "", "{plural3} animals were reclaimed by their owners" : "", "{plural3} animals were transferred to other shelters" : "", "{plural3} days." : "", "{plural3} incomplete animal control calls" : "", "{plural3} items of stock expire in the next month" : "", "{plural3} items of stock have expired" : "", "{plural3} medical treatments need to be administered today" : "", "{plural3} months." : "", "{plural3} new online form submissions" : "", "{plural3} people have overdue payments" : "", "{plural3} people with active reservations have not been homechecked" : "", "{plural3} potential matches for lost animals" : "", "{plural3} recent publisher runs had errors" : "", "{plural3} reservations have been active over a week without adoption" : "", "{plural3} results found in {1} seconds. Order: {2}" : "", "{plural3} shelter animals have not been microchipped" : "", "{plural3} shelter animals have people looking for them" : "", "{plural3} tests need to be performed today" : "", "{plural3} transports do not have a driver assigned" : "", "{plural3} traps are overdue for return" : "", "{plural3} trial adoptions have ended" : "", "{plural3} unaltered animals have been adopted in the last month" : "", "{plural3} undispatched animal control calls" : "", "{plural3} unpaid fines" : "", "{plural3} urgent entries on the waiting list" : "", "{plural3} vaccinations have expired" : "", "{plural3} vaccinations need to be administered today" : "", "{plural3} weeks." : "", "{plural3} years." : "" }
bobintetley/asm3
src/asm3/locales/locale_et.py
Python
gpl-3.0
122,322
[ "Amber", "VisIt" ]
3501d3c3dabf55a3d63a30b1187c508ad1b8f3469d5b878af22685c64918f525
""" surface - Grids table data using adjustable tension continuous curvature splines. """ import xarray as xr from pygmt.clib import Session from pygmt.exceptions import GMTInvalidInput from pygmt.helpers import ( GMTTempFile, build_arg_string, data_kind, dummy_context, fmt_docstring, kwargs_to_strings, use_alias, ) @fmt_docstring @use_alias( I="spacing", R="region", G="outfile", V="verbose", a="aspatial", f="coltypes", r="registration", ) @kwargs_to_strings(R="sequence") def surface(x=None, y=None, z=None, data=None, **kwargs): r""" Grids table data using adjustable tension continuous curvature splines. Surface reads randomly-spaced (x,y,z) triples and produces gridded values z(x,y) by solving: .. math:: (1 - t)\nabla^2(z)+t\nabla(z) = 0 where :math:`t` is a tension factor between 0 and 1, and :math:`\nabla` indicates the Laplacian operator. Takes a matrix, xyz triples, or a file name as input. Must provide either ``data`` or ``x``, ``y``, and ``z``. Full option list at :gmt-docs:`surface.html` {aliases} Parameters ---------- x/y/z : 1d arrays Arrays of x and y coordinates and values z of the data points. data : str or 2d array Either a data file name or a 2d numpy array with the tabular data. {I} region : str or list *xmin/xmax/ymin/ymax*\[**+r**][**+u**\ *unit*]. Specify the region of interest. outfile : str Optional. The file name for the output netcdf file with extension .nc to store the grid in. {V} {a} {f} {r} Returns ------- ret: xarray.DataArray or None Return type depends on whether the ``outfile`` parameter is set: - :class:`xarray.DataArray`: if ``outfile`` is not set - None if ``outfile`` is set (grid output will be stored in file set by ``outfile``) """ kind = data_kind(data, x, y, z) if kind == "vectors" and z is None: raise GMTInvalidInput("Must provide z with x and y.") with GMTTempFile(suffix=".nc") as tmpfile: with Session() as lib: if kind == "file": file_context = dummy_context(data) elif kind == "matrix": file_context = lib.virtualfile_from_matrix(data) elif kind == "vectors": file_context = lib.virtualfile_from_vectors(x, y, z) else: raise GMTInvalidInput("Unrecognized data type: {}".format(type(data))) with file_context as infile: if "G" not in kwargs.keys(): # if outfile is unset, output to tmpfile kwargs.update({"G": tmpfile.name}) outfile = kwargs["G"] arg_str = " ".join([infile, build_arg_string(kwargs)]) lib.call_module(module="surface", args=arg_str) if outfile == tmpfile.name: # if user did not set outfile, return DataArray with xr.open_dataarray(outfile) as dataarray: result = dataarray.load() _ = result.gmt # load GMTDataArray accessor information elif outfile != tmpfile.name: # if user sets an outfile, return None result = None return result
GenericMappingTools/gmt-python
pygmt/src/surface.py
Python
bsd-3-clause
3,304
[ "NetCDF" ]
7ab6290a7f8e1f3fff61e6ca503f8b127f642e999f20f25d41853d6ad04a65f8
#!/usr/bin/env python # Copyright 2014-2018 The PySCF Developers. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from functools import reduce import numpy from pyscf import gto from pyscf import lib from pyscf import scf from pyscf.lo import iao, ibo, orth, pipek, vvo mol = gto.Mole() mol.atom = ''' O 0. 0. 0 h 0. -0.757 0.587 h 0. 0.757 0.587''' mol.basis = 'unc-sto3g' mol.verbose = 5 mol.output = '/dev/null' mol.build() class KnownValues(unittest.TestCase): def test_vvo(self): mf = scf.RHF(mol).run() nocc = numpy.sum(mf.mo_occ>0) b = vvo.vvo(mol, mf.mo_coeff[:,0:nocc], mf.mo_coeff[:,nocc:]) s_b = reduce(numpy.dot, (b.T, mf.get_ovlp(), b)) self.assertTrue(abs(s_b.diagonal() - 1).max() < 1e-9) pop = pipek.atomic_pops(mol, b) z = numpy.einsum('xii,xii->', pop, pop) self.assertAlmostEqual(z, 0.6695907625196215, 5) def test_livvo(self): mf = scf.RHF(mol).run() nocc = numpy.sum(mf.mo_occ>0) b = vvo.livvo(mol, mf.mo_coeff[:,0:nocc], mf.mo_coeff[:,nocc:], exponent=4) s_b = reduce(numpy.dot, (b.T, mf.get_ovlp(), b)) self.assertTrue(abs(s_b.diagonal() - 1).max() < 1e-9) pop = pipek.atomic_pops(mol, b) z = numpy.einsum('xii,xii->', pop, pop) self.assertAlmostEqual(z, 1.073138251815934, 5) b = vvo.livvo(mol, mf.mo_coeff[:,0:nocc], mf.mo_coeff[:,nocc:], exponent=2) s_b = reduce(numpy.dot, (b.T, mf.get_ovlp(), b)) self.assertTrue(abs(s_b.diagonal() - 1).max() < 1e-9) pop = pipek.atomic_pops(mol, b) z = numpy.einsum('xii,xii->', pop, pop) self.assertAlmostEqual(z, 1.073138251815934, 5) def test_livvo_PM(self): mf = scf.RHF(mol).run() nocc = numpy.sum(mf.mo_occ>0) b = vvo.livvo(mol, mf.mo_coeff[:,0:nocc], mf.mo_coeff[:,nocc:], locmethod='PM', exponent=4).kernel() pop = pipek.atomic_pops(mol, b) z = numpy.einsum('xii,xii->', pop, pop) self.assertAlmostEqual(z, 0.6695907625196215, 5) b = vvo.livvo(mol, mf.mo_coeff[:,0:nocc], mf.mo_coeff[:,nocc:], locmethod='PM', exponent=2).kernel() pop = pipek.atomic_pops(mol, b) z = numpy.einsum('xii,xii->', pop, pop) self.assertAlmostEqual(z, 0.6695907625196215, 5) if __name__ == "__main__": print("Full tests for vvo") unittest.main()
gkc1000/pyscf
pyscf/lo/test/test_vvo.py
Python
apache-2.0
2,957
[ "PySCF" ]
b5a7aa4d2479eddd10ccd0e39b7829a913f413895a55a3664096fcf994811f78
import sys import os sys.path.insert(0, os.path.join( os.path.dirname(os.path.realpath(__file__)), '..')) import sympy as sp from crnpy.crn import CRN, from_react_file from crnpy.parsereaction import parse_expr __author__ = "Elisa Tonello" __copyright__ = "Copyright (c) 2016, Elisa Tonello" __license__ = "BSD" __version__ = "0.0.1" # Concerted model of cooperativity. # Ingalls, Brian. "Mathematical Modelling in Systems Biology: An Introduction.", 2013. # 3.7.11 filename = "data/reactions/concerted_coop" crn = from_react_file(filename) print(crn.derivative('2*R2X2 + 2*T2X2 + R2X + T2X + X')) crn.inspect(True) crn.rapid_eq('R2X2', 'R2X + X') crn.rapid_eq('T2X2', 'T2X + X') crn.rapid_eq('R2X', 'R2 + X') crn.rapid_eq('T2X', 'T2 + X') crn.rapid_eq('R2', 'T2') print("") print(crn.removed_species) print("") saturation = parse_expr('(T2X + R2X + 2*T2X2 + 2*R2X2)/2/(T2 + R2 + T2X + R2X + T2X2 + R2X2)') for variable, expr in crn.removed_species: saturation = saturation.subs(variable, expr).simplify() print("Saturation: {}".format(saturation)) Y = parse_expr("(K*X/KT*(1+X/KT)+X/KR*(1+X/KR))/(K*(1+X/KT)**2+(1+X/KR)**2)"). \ subs("K", parse_expr("k_1/k1")). \ subs("KT", parse_expr("k_2/k2")). \ subs("KR", parse_expr("k_3/k3")).simplify() print(sp.simplify(saturation.expand() - Y.expand() ) == 0)
etonello/crnpy
examples/concerted_coop.py
Python
bsd-3-clause
1,379
[ "Brian" ]
29396b68a8d1bf1a2fdb192d567b6674185959ede0dea7a975fd9e816424d45b
#!/usr/bin/env python # # @BEGIN LICENSE # # Psi4: an open-source quantum chemistry software package # # Copyright (c) 2007-2021 The Psi4 Developers. # # The copyrights for code used from other parties are included in # the corresponding files. # # This file is part of Psi4. # # Psi4 is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, version 3. # # Psi4 is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License along # with Psi4; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # @END LICENSE # import glob import os import sys sys.path.append(os.path.dirname(__file__) + '/../../../driver') try: import qcdb except ImportError: print("""Cannot load qcdb python module. Run this script in situ or append the psi4/share/psi4/scripts directory to $PYTHONPATH.""") exit(1) """ Utility: This script converts a set of geometry files in XYZ format into a python database file for psi4 and qcdb scripts. Instructions: Detailed instructions may be found at http://sirius.chem.vt.edu/psi4manual/latest/quickadddatabase.html . In short, move all XYZ files intended for a database into a directory and run this script from that directory. Answer a few questions about the intended database. Edit the resulting database.py file if necessary, then copy it into psi4/share/psi4/databases/ . Its contents can be accessed as normal through the db() wrapper with no further configuration or recompiling. Created: Monday, December 21, 2009, LAB Last Modified: Tuesday, September 10, 2013, LAB """ # instructions print(""" Welcome to ixyz2database. Just fill in the variables when prompted. Hit ENTER to accept default. Strings should not be in quotes. Elements in arrays should be space-delimited. """) # query database name print(""" Name your database. Recommend 3-8 characters, all capitalized letters. e.g., MINE or BZ6 """) user_obedient = False while not user_obedient: dbse = input(' dbse = ').strip() if dbse.isalnum(): user_obedient = True # query file extension print(""" XYZ file extension. All files with this extension in the current directory will be processed Additionally, all files with extension p4m in the current dir will be processed as psi4 mol format """) fext = input(' fext = [xyz] ').strip() if fext == "": fext = 'xyz' # query xyz file comment line print(""" What should line two of the XYZ file be used for (needn't be specially formatted in all files) [cgmp] Treat first item in line as system charge, second as multiplicity, rest as comment [comment] Treat content as text for the comment line [trash] Ignore content """) user_obedient = False while not user_obedient: line2 = input(' line2 = [cgmp] ').strip().lower() if line2 == "": line2 = 'cgmp' if line2 == 'comment' or line2 == 'cgmp' or line2 == 'trash': user_obedient = True # query closed shell print(""" Are open-shell or non-singlets are present among your systems (or subsystems in the case of dimers)? """) isOS = qcdb.query_yes_no(' isOS =', False) # query database type print(""" What is the nature of the systems in your incipient database? [1] I have a bunch of plain molecules (no need to act on any subsystems) that I want to be able to act upon in parallel. [2] I have a bunch of molecules that I want to form into a database whose reference quantity corresponds to various combinations thereof. [3] I have a bunch of dimers (only dimer, no monomer, files should be present) that I want to form into a database whose reference quantity is interaction energy. Your final database may of course resemble any combination of these choices. This is but a humble script to get you started. """) user_obedient = False while not user_obedient: route = input(' route = ').strip().lower() if route.isdigit(): route = int(route) if route == 1 or route == 2 or route == 3: user_obedient = True # query number of reactions if route == 2: print(""" How many reactions (things that have a reference quantity, as opposed to reagents that have a geometry) are in the database? """) user_obedient = False while not user_obedient: Nrxn = input(' Nrxn = ').strip().lower() if Nrxn.isdigit(): Nrxn = int(Nrxn) user_obedient = True else: Nrxn = 1 # TODO really need? # initialize containers spy = "" gpy = "" HRXN = range(1, Nrxn + 1) BINDRXN = {} TAGLRXN = {} for rxn in HRXN: BINDRXN[rxn] = None # "nan" ? TODO TAGLRXN[rxn] = 'Reaction %s' % (rxn) # reagent geometry section gpy += "\n# <<< Geometry Specification Strings >>>\n" gpy += "GEOS = {}\n\n" count = 0 HRGT = [] TAGLRGT = {} BINDRGT = {} print("\n%-25s %6s %6s %6s %6s %6s\t\t%s\n" % ("system", "CHGsyst", "MLPsyst", "Natom", "Nmol1", "Nmol2", "Fragmentation Pattern")) for xyzfile in (glob.glob('*.' + fext) + glob.glob('*.p4m')): # ascertain system name and open file system = os.path.splitext(xyzfile)[0] HRGT.append(system) f = open(xyzfile, 'r') text = f.readlines() f.close() # use Molecule object to read geometry in xyz file mol = qcdb.Molecule.from_string(''.join(text), fix_com=True, fix_orientation=True) Nsyst = mol.natom() # alter second line if line2 == 'cgmp': pass elif line2 == 'comment': mol.set_molecular_charge(0) mol.fragment_charges[0] = 0 mol.set_multiplicity(1) mol.fragment_multiplicities[0] = 1 mol.tagline = text[1].strip() elif line2 == 'trash': mol.set_molecular_charge(0) mol.fragment_charges[0] = 0 mol.set_multiplicity(1) mol.fragment_multiplicities[0] = 1 mol.tagline = "" CHGsyst = mol.molecular_charge() MLPsyst = mol.multiplicity() TAGLRGT[system] = mol.tagline BINDRGT[system] = None # "nan" ? # TODO if route == 3 and mol.nfragments() == 1: frag_pattern, mol = mol.BFS(return_molecule=True) Nmol1 = mol.fragments[0][1] - mol.fragments[0][0] + 1 Nmol2 = mol.fragments[1][1] - mol.fragments[1][0] + 1 print("%-25s %6d %6d %6d %6d %6d\t\t%s" % (system, CHGsyst, MLPsyst, Nsyst, Nmol1, Nmol2, frag_pattern)) gpy += "GEOS['%%s-%%s-%%s' %% (dbse, '%s', 'dimer')] = qcdb.Molecule(\"\"\"\n" % (str(system)) if mol.nfragments() != 2: print("ERROR: 2 fragments not detected for system %s." % (system)) print(" If you really have trimers or above, contact LAB to modify this script.\n") sys.exit() else: print("%-25s %6d %6d %6d %6d %6d" % (system, CHGsyst, MLPsyst, Nsyst, Nsyst, 0)) gpy += "GEOS['%%s-%%s-%%s' %% (dbse, '%s', 'reagent')] = qcdb.Molecule(\"\"\"\n" % (str(system)) gpy += mol.create_psi4_string_from_molecule() gpy += """\"\"\")\n\n""" count += 1 Nrgt = len(HRGT) if Nrgt != count: print("ERROR: discrepancy in counting systems $Nrgt vs $count!\n") sys.exit() # python database file docstring = """\"\"\" | Database of <description of members and reference energy type>. | Geometries from <Reference>. | Reference interaction energies from <Reference>. """ if route == 3: docstring += """ - **cp** ``'off'`` <erase this comment and after unless on is a valid option> || ``'on'`` - **rlxd** ``'off'`` <erase this comment and after unless on is valid option> || ``'on'`` """ docstring += """ - **benchmark** - ``'<benchmark_name>'`` <Reference>. - |dl| ``'<default_benchmark_name>'`` |dr| <Reference>. - **subset** - ``'small'`` <members_description> - ``'large'`` <members_description> - ``'<subset>'`` <members_description> \"\"\" """ spy += docstring spy += 'import re\n' spy += 'import qcdb\n' spy += "\n# <<< %s Database Module >>>\n" % (dbse) spy += "dbse = %s\n" % ("'" + dbse + "'") if isOS == True: spy += "isOS = '%s'\n" % (isOS) spy += "\n# <<< Database Members >>>\n" spy += "HRXN = [" if route == 1: for rgt in HRGT: spy += "'%s', " % (rgt) elif route == 2: for rxn in HRXN: spy += "'%s', " % (rxn) elif route == 3: for rgt in HRGT: spy += "'%s', " % (rgt) spy += "]\n" spy += "HRXN_SM = []\n" spy += "HRXN_LG = []\n" spy += "\n# <<< Chemical Systems Involved >>>\n" spy += "RXNM = {} # reaction matrix of reagent contributions per reaction\n" spy += "ACTV = {} # order of active reagents per reaction\n" if route == 1: for rgt in HRGT: spy += """ACTV['%%s-%%s' %% (dbse, %-23s )] = """ % ("'" + rgt + "'") spy += """['%%s-%%s-reagent' %% (dbse, %s)]\n""" % ("'" + rgt + "'") spy += """RXNM['%%s-%%s' %% (dbse, %-23s )] = """ % ("'" + rgt + "'") spy += """dict(zip(ACTV['%%s-%%s' %% (dbse, %s)], [+1]))\n\n""" % ("'" + rgt + "'") elif route == 2: for rxn in HRXN: spy += """ACTV['%%s-%%s' %% (dbse, %-23s )] = """ % ("'" + str(rxn) + "'") spy += """['%%s-%%s-reagent' %% (dbse, %s),\n""" % ("''") spy += """%62s '%%s-%%s-reagent' %% (dbse, %s),\n""" % ("", "''") spy += """%62s '%%s-%%s-reagent' %% (dbse, %s) ]\n""" % ("", "''") spy += """RXNM['%%s-%%s' %% (dbse, %-23s )] = """ % ("'" + str(rxn) + "'") spy += """dict(zip(ACTV['%%s-%%s' %% (dbse, %s)], []))\n\n""" % ("'" + str(rxn) + "'") elif route == 3: pass spy += "ACTV_CP = {} # order of active reagents per counterpoise-corrected reaction\n" spy += "ACTV_SA = {} # order of active reagents for non-supermolecular calculations\n" spy += "for rxn in HRXN:\n\n" spy += " RXNM[ '%s-%s' % (dbse, rxn)] = {'%s-%s-dimer' % (dbse, rxn) : +1,\n" spy += " '%s-%s-monoA-CP' % (dbse, rxn) : -1,\n" spy += " '%s-%s-monoB-CP' % (dbse, rxn) : -1,\n" spy += " '%s-%s-monoA-unCP' % (dbse, rxn) : -1,\n" spy += " '%s-%s-monoB-unCP' % (dbse, rxn) : -1 }\n\n" spy += " ACTV_SA['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn) ]\n\n" spy += " ACTV_CP['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),\n" spy += " '%s-%s-monoA-CP' % (dbse, rxn),\n" spy += " '%s-%s-monoB-CP' % (dbse, rxn) ]\n\n" spy += " ACTV[ '%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),\n" spy += " '%s-%s-monoA-unCP' % (dbse, rxn),\n" spy += " '%s-%s-monoB-unCP' % (dbse, rxn) ]\n\n" spy += "# <<< Reference Values [kcal/mol] >>>\n" spy += "BIND = {}\n" #print SPY_OUT "nan = float('NaN')\n"; if route == 1: for rgt in HRGT: spy += """BIND['%%s-%%s' %% (dbse, %-23s )] = """ % ("'" + rgt + "'") spy += """%8.3f\n""" % (0.0) # TODO BINDRGT[rgt])) elif route == 2: for rxn in HRXN: pass spy += """BIND['%%s-%%s' %% (dbse, %-23s )] = """ % ("'" + str(rxn) + "'") spy += """%8.3f\n""" % (0.0) # TODO BINDRGT[rxn])) elif route == 3: for rgt in HRGT: pass spy += """BIND['%%s-%%s' %% (dbse, %-23s )] = """ % ("'" + rgt + "'") spy += """%8.3f\n""" % (0.0) # TODO BINDRGT[rgt])) # write comment line section spy += "\n# <<< Comment Lines >>>\n" spy += "TAGL = {}\n" if route == 1: for rgt in HRGT: spy += """TAGL['%%s-%%s' %% (dbse, %-23s )] = """ % ("'" + rgt + "'") spy += """\"\"\"%s \"\"\"\n""" % (TAGLRGT[rgt]) spy += """TAGL['%%s-%%s-reagent' %% (dbse, %-23s )] = """ % ("'" + rgt + "'") spy += """\"\"\"%s \"\"\"\n""" % (TAGLRGT[rgt]) elif route == 2: for rxn in HRXN: spy += """TAGL['%%s-%%s' %% (dbse, %-23s )] = """ % ("'" + str(rxn) + "'") spy += """\"\"\"%s \"\"\"\n""" % (TAGLRXN[rxn]) for rgt in HRGT: spy += """TAGL['%%s-%%s-reagent' %% (dbse, %-23s )] = """ % ("'" + rgt + "'") spy += """\"\"\"%s \"\"\"\n""" % (TAGLRGT[rgt]) elif route == 3: for rgt in HRGT: spy += """TAGL['%%s-%%s' %% (dbse, %-23s )] = """ % ("'" + rgt + "'") spy += """\"\"\"%s \"\"\"\n""" % (TAGLRGT[rgt]) spy += """TAGL['%%s-%%s-dimer' %% (dbse, %-23s )] = """ % ("'" + rgt + "'") spy += """\"\"\"%s \"\"\"\n""" % ('Dimer from ' + TAGLRGT[rgt]) spy += """TAGL['%%s-%%s-monoA-CP' %% (dbse, %-23s )] = """ % ("'" + rgt + "'") spy += """\"\"\"%s \"\"\"\n""" % ('Monomer A from ' + TAGLRGT[rgt]) spy += """TAGL['%%s-%%s-monoB-CP' %% (dbse, %-23s )] = """ % ("'" + rgt + "'") spy += """\"\"\"%s \"\"\"\n""" % ('Monomer B from ' + TAGLRGT[rgt]) spy += """TAGL['%%s-%%s-monoA-unCP' %% (dbse, %-23s )] = """ % ("'" + rgt + "'") spy += """\"\"\"%s \"\"\"\n""" % ('Monomer A from ' + TAGLRGT[rgt]) spy += """TAGL['%%s-%%s-monoB-unCP' %% (dbse, %-23s )] = """ % ("'" + rgt + "'") spy += """\"\"\"%s \"\"\"\n""" % ('Monomer B from ' + TAGLRGT[rgt]) # write subset geometry section if route == 3: gpy += "# <<< Derived Geometry Strings >>>\n" gpy += "for rxn in HRXN:\n" gpy += " GEOS['%s-%s-monoA-unCP' % (dbse, rxn)] = " gpy += "GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(1)\n" gpy += " GEOS['%s-%s-monoB-unCP' % (dbse, rxn)] = " gpy += "GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(2)\n" gpy += " GEOS['%s-%s-monoA-CP' % (dbse, rxn)] = " gpy += "GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(1, 2)\n" gpy += " GEOS['%s-%s-monoB-CP' % (dbse, rxn)] = " gpy += "GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(2, 1)\n" # arrange intermediate strings into final database file fpy = open('%s.py' % (dbse), 'w') fpy.write(spy) fpy.write(gpy) fpy.close() # display customized advice for finishing off the database final = """ ** Congratulations, your database file %s.py has been constructed! ** To have a minimally functioning database, do the following: """ % (dbse) if line2 == 'comment' and isOS == True: final += """ * If not all neutral singlets, fill in correct charge and multiplicity for all reagents. """ if line2 == 'comment' and isOS == False: final += """ * If not all neutral, fill in correct charge for all reagents. """ if route == 3 and line2 == 'cgmp': final += """ * The charge and multiplicity read in from line2 of the xyz files has been assigned to fragmentA, leaving fragmentB as a neutral singlet. If this is incorrect for any reagents, reapportion the charge and multiplicity correctly between fragments A & B. """ if route == 3 and line2 == 'comment': final += """ * If dimer and both subsystems are not neutral singlets, fill in correct charge and multiplicity for each subsystem. """ if route == 2: final += """ * Define the reagents that contribute to reach reaction by filling in the empty single quotes in ACTV. Add or delete lines as necessary for each reaction if more or fewer than three reagents contribute. See NHTBH.py as an example. * Define the mathematical contribution of reagents to reactions by filling in a number (most often +1 or -1) for each reagent to the RXNM of each reaction. See NHTBH.py as an example. """ final += """ * Make sure the Psi4 driver can find your new database. M %s.py into INSTALLED_DIRECTORY/share/psi4/databases . Alternatively, add the directory containing %s.py into PYTHONPATH . """ % (dbse, dbse, dbse) final += """ ** To enhance the functionality/documentation of your database, do the following: * Rearrange the order of reactions in HRXN, as this will define the order for the database. * Fill in the skeleton docstring at top of file, adding sources for geometries and any reference data. This info will show up in the online documentation. * Fill in the comment lines of TAGL in plain text. These show up as banners in job output files. * Fill in reference values (in kcal/mol) into BIND. * If multiple sets of reference values are available, define each in an array BIND_ALTREF so that they can be called in a psi4 input file as benchmark='ALTREF'. Add the new reference to the docstring. See S22.py as an example. * Fill in the least computationally expensive 2-3 reactions into HRXN_SM and the most expensive into HRXN_LG so that they can be called in a psi4 input file as subset='small' or subset='large'. * Define subsets of reactions such as in an array SUBSETARRAY=['reaction', 'reaction'] so that they can be called in a psi4 input file as subset='SUBSETARRAY'. Add the new subset option to to the docstring. See NBC10.py for a simple example or CFLOW.py for a complex example. """ print(final)
psi-rking/psi4
psi4/share/psi4/scripts/ixyz2database.py
Python
lgpl-3.0
17,782
[ "Psi4" ]
5ef0d5916fb343875d450642cc4f5f23e28a52091a4cb7485fd959b2d643a78e
# -*- coding: utf-8 -*- # Copyright (c) 2018, earthians and contributors # For license information, please see license.txt from __future__ import unicode_literals import frappe import datetime from frappe import _ import math from frappe.utils import time_diff_in_hours, rounded, getdate, add_days from erpnext.healthcare.doctype.healthcare_settings.healthcare_settings import get_income_account from erpnext.healthcare.doctype.fee_validity.fee_validity import create_fee_validity, update_fee_validity from erpnext.healthcare.doctype.lab_test.lab_test import create_multiple @frappe.whitelist() def get_healthcare_services_to_invoice(patient): patient = frappe.get_doc("Patient", patient) if patient: if patient.customer: item_to_invoice = [] patient_appointments = frappe.get_list("Patient Appointment",{'patient': patient.name, 'invoiced': False}, order_by="appointment_date") if patient_appointments: fee_validity_details = [] valid_days = frappe.db.get_value("Healthcare Settings", None, "valid_days") max_visit = frappe.db.get_value("Healthcare Settings", None, "max_visit") for patient_appointment in patient_appointments: patient_appointment_obj = frappe.get_doc("Patient Appointment", patient_appointment['name']) if patient_appointment_obj.procedure_template: if frappe.db.get_value("Clinical Procedure Template", patient_appointment_obj.procedure_template, "is_billable") == 1: item_to_invoice.append({'reference_type': 'Patient Appointment', 'reference_name': patient_appointment_obj.name, 'service': patient_appointment_obj.procedure_template}) else: practitioner_exist_in_list = False skip_invoice = False if fee_validity_details: for validity in fee_validity_details: if validity['practitioner'] == patient_appointment_obj.practitioner: practitioner_exist_in_list = True if validity['valid_till'] >= patient_appointment_obj.appointment_date: validity['visits'] = validity['visits']+1 if int(max_visit) > validity['visits']: skip_invoice = True if not skip_invoice: validity['visits'] = 1 validity['valid_till'] = patient_appointment_obj.appointment_date + datetime.timedelta(days=int(valid_days)) if not practitioner_exist_in_list: valid_till = patient_appointment_obj.appointment_date + datetime.timedelta(days=int(valid_days)) visits = 0 validity_exist = validity_exists(patient_appointment_obj.practitioner, patient_appointment_obj.patient) if validity_exist: fee_validity = frappe.get_doc("Fee Validity", validity_exist[0][0]) valid_till = fee_validity.valid_till visits = fee_validity.visited fee_validity_details.append({'practitioner': patient_appointment_obj.practitioner, 'valid_till': valid_till, 'visits': visits}) if not skip_invoice: practitioner_charge = 0 income_account = None service_item = None if patient_appointment_obj.practitioner: service_item, practitioner_charge = service_item_and_practitioner_charge(patient_appointment_obj) income_account = get_income_account(patient_appointment_obj.practitioner, patient_appointment_obj.company) item_to_invoice.append({'reference_type': 'Patient Appointment', 'reference_name': patient_appointment_obj.name, 'service': service_item, 'rate': practitioner_charge, 'income_account': income_account}) encounters = frappe.get_list("Patient Encounter", {'patient': patient.name, 'invoiced': False, 'docstatus': 1}) if encounters: for encounter in encounters: encounter_obj = frappe.get_doc("Patient Encounter", encounter['name']) if not encounter_obj.appointment: practitioner_charge = 0 income_account = None service_item = None if encounter_obj.practitioner: service_item, practitioner_charge = service_item_and_practitioner_charge(encounter_obj) income_account = get_income_account(encounter_obj.practitioner, encounter_obj.company) item_to_invoice.append({'reference_type': 'Patient Encounter', 'reference_name': encounter_obj.name, 'service': service_item, 'rate': practitioner_charge, 'income_account': income_account}) lab_tests = frappe.get_list("Lab Test", {'patient': patient.name, 'invoiced': False}) if lab_tests: for lab_test in lab_tests: lab_test_obj = frappe.get_doc("Lab Test", lab_test['name']) if frappe.db.get_value("Lab Test Template", lab_test_obj.template, "is_billable") == 1: item_to_invoice.append({'reference_type': 'Lab Test', 'reference_name': lab_test_obj.name, 'service': frappe.db.get_value("Lab Test Template", lab_test_obj.template, "item")}) lab_rxs = frappe.db.sql("""select lp.name from `tabPatient Encounter` et, `tabLab Prescription` lp where et.patient=%s and lp.parent=et.name and lp.lab_test_created=0 and lp.invoiced=0""", (patient.name)) if lab_rxs: for lab_rx in lab_rxs: rx_obj = frappe.get_doc("Lab Prescription", lab_rx[0]) if rx_obj.lab_test_code and (frappe.db.get_value("Lab Test Template", rx_obj.lab_test_code, "is_billable") == 1): item_to_invoice.append({'reference_type': 'Lab Prescription', 'reference_name': rx_obj.name, 'service': frappe.db.get_value("Lab Test Template", rx_obj.lab_test_code, "item")}) procedures = frappe.get_list("Clinical Procedure", {'patient': patient.name, 'invoiced': False}) if procedures: for procedure in procedures: procedure_obj = frappe.get_doc("Clinical Procedure", procedure['name']) if not procedure_obj.appointment: if procedure_obj.procedure_template and (frappe.db.get_value("Clinical Procedure Template", procedure_obj.procedure_template, "is_billable") == 1): item_to_invoice.append({'reference_type': 'Clinical Procedure', 'reference_name': procedure_obj.name, 'service': frappe.db.get_value("Clinical Procedure Template", procedure_obj.procedure_template, "item")}) procedure_rxs = frappe.db.sql("""select pp.name from `tabPatient Encounter` et, `tabProcedure Prescription` pp where et.patient=%s and pp.parent=et.name and pp.procedure_created=0 and pp.invoiced=0 and pp.appointment_booked=0""", (patient.name)) if procedure_rxs: for procedure_rx in procedure_rxs: rx_obj = frappe.get_doc("Procedure Prescription", procedure_rx[0]) if frappe.db.get_value("Clinical Procedure Template", rx_obj.procedure, "is_billable") == 1: item_to_invoice.append({'reference_type': 'Procedure Prescription', 'reference_name': rx_obj.name, 'service': frappe.db.get_value("Clinical Procedure Template", rx_obj.procedure, "item")}) procedures = frappe.get_list("Clinical Procedure", {'patient': patient.name, 'invoice_separately_as_consumables': True, 'consumption_invoiced': False, 'consume_stock': True, 'status': 'Completed'}) if procedures: service_item = get_healthcare_service_item('clinical_procedure_consumable_item') if not service_item: msg = _(("Please Configure {0} in ").format("Clinical Procedure Consumable Item") \ + """<b><a href="#Form/Healthcare Settings">Healthcare Settings</a></b>""") frappe.throw(msg) for procedure in procedures: procedure_obj = frappe.get_doc("Clinical Procedure", procedure['name']) item_to_invoice.append({'reference_type': 'Clinical Procedure', 'reference_name': procedure_obj.name, 'service': service_item, 'rate': procedure_obj.consumable_total_amount, 'description': procedure_obj.consumption_details}) inpatient_services = frappe.db.sql("""select io.name, io.parent from `tabInpatient Record` ip, `tabInpatient Occupancy` io where ip.patient=%s and io.parent=ip.name and io.left=1 and io.invoiced=0""", (patient.name)) if inpatient_services: for inpatient_service in inpatient_services: inpatient_occupancy = frappe.get_doc("Inpatient Occupancy", inpatient_service[0]) service_unit_type = frappe.get_doc("Healthcare Service Unit Type", frappe.db.get_value("Healthcare Service Unit", inpatient_occupancy.service_unit, "service_unit_type")) if service_unit_type and service_unit_type.is_billable == 1: hours_occupied = time_diff_in_hours(inpatient_occupancy.check_out, inpatient_occupancy.check_in) qty = 0.5 if hours_occupied > 0: actual_qty = hours_occupied / service_unit_type.no_of_hours floor = math.floor(actual_qty) decimal_part = actual_qty - floor if decimal_part > 0.5: qty = rounded(floor + 1, 1) elif decimal_part < 0.5 and decimal_part > 0: qty = rounded(floor + 0.5, 1) if qty <= 0: qty = 0.5 item_to_invoice.append({'reference_type': 'Inpatient Occupancy', 'reference_name': inpatient_occupancy.name, 'service': service_unit_type.item, 'qty': qty}) return item_to_invoice else: frappe.throw(_("The Patient {0} do not have customer refrence to invoice").format(patient.name)) def service_item_and_practitioner_charge(doc): is_ip = doc_is_ip(doc) if is_ip: service_item = get_practitioner_service_item(doc.practitioner, "inpatient_visit_charge_item") if not service_item: service_item = get_healthcare_service_item("inpatient_visit_charge_item") else: service_item = get_practitioner_service_item(doc.practitioner, "op_consulting_charge_item") if not service_item: service_item = get_healthcare_service_item("op_consulting_charge_item") if not service_item: throw_config_service_item(is_ip) practitioner_charge = get_practitioner_charge(doc.practitioner, is_ip) if not practitioner_charge: throw_config_practitioner_charge(is_ip, doc.practitioner) return service_item, practitioner_charge def throw_config_service_item(is_ip): service_item_lable = "Out Patient Consulting Charge Item" if is_ip: service_item_lable = "Inpatient Visit Charge Item" msg = _(("Please Configure {0} in ").format(service_item_lable) \ + """<b><a href="#Form/Healthcare Settings">Healthcare Settings</a></b>""") frappe.throw(msg) def throw_config_practitioner_charge(is_ip, practitioner): charge_name = "OP Consulting Charge" if is_ip: charge_name = "Inpatient Visit Charge" msg = _(("Please Configure {0} for Healthcare Practitioner").format(charge_name) \ + """ <b><a href="#Form/Healthcare Practitioner/{0}">{0}</a></b>""".format(practitioner)) frappe.throw(msg) def get_practitioner_service_item(practitioner, service_item_field): return frappe.db.get_value("Healthcare Practitioner", practitioner, service_item_field) def get_healthcare_service_item(service_item_field): return frappe.db.get_value("Healthcare Settings", None, service_item_field) def doc_is_ip(doc): is_ip = False if doc.inpatient_record: is_ip = True return is_ip def get_practitioner_charge(practitioner, is_ip): if is_ip: practitioner_charge = frappe.db.get_value("Healthcare Practitioner", practitioner, "inpatient_visit_charge") else: practitioner_charge = frappe.db.get_value("Healthcare Practitioner", practitioner, "op_consulting_charge") if practitioner_charge: return practitioner_charge return False def manage_invoice_submit_cancel(doc, method): if doc.items: for item in doc.items: if item.get("reference_dt") and item.get("reference_dn"): if frappe.get_meta(item.reference_dt).has_field("invoiced"): set_invoiced(item, method, doc.name) if method=="on_submit" and frappe.db.get_value("Healthcare Settings", None, "create_test_on_si_submit") == '1': create_multiple("Sales Invoice", doc.name) def set_invoiced(item, method, ref_invoice=None): invoiced = False if(method=="on_submit"): validate_invoiced_on_submit(item) invoiced = True if item.reference_dt == 'Clinical Procedure': if get_healthcare_service_item('clinical_procedure_consumable_item') == item.item_code: frappe.db.set_value(item.reference_dt, item.reference_dn, "consumption_invoiced", invoiced) else: frappe.db.set_value(item.reference_dt, item.reference_dn, "invoiced", invoiced) else: frappe.db.set_value(item.reference_dt, item.reference_dn, "invoiced", invoiced) if item.reference_dt == 'Patient Appointment': if frappe.db.get_value('Patient Appointment', item.reference_dn, 'procedure_template'): dt_from_appointment = "Clinical Procedure" else: manage_fee_validity(item.reference_dn, method, ref_invoice) dt_from_appointment = "Patient Encounter" manage_doc_for_appoitnment(dt_from_appointment, item.reference_dn, invoiced) elif item.reference_dt == 'Lab Prescription': manage_prescriptions(invoiced, item.reference_dt, item.reference_dn, "Lab Test", "lab_test_created") elif item.reference_dt == 'Procedure Prescription': manage_prescriptions(invoiced, item.reference_dt, item.reference_dn, "Clinical Procedure", "procedure_created") def validate_invoiced_on_submit(item): if item.reference_dt == 'Clinical Procedure' and get_healthcare_service_item('clinical_procedure_consumable_item') == item.item_code: is_invoiced = frappe.db.get_value(item.reference_dt, item.reference_dn, "consumption_invoiced") else: is_invoiced = frappe.db.get_value(item.reference_dt, item.reference_dn, "invoiced") if is_invoiced == 1: frappe.throw(_("The item referenced by {0} - {1} is already invoiced"\ ).format(item.reference_dt, item.reference_dn)) def manage_prescriptions(invoiced, ref_dt, ref_dn, dt, created_check_field): created = frappe.db.get_value(ref_dt, ref_dn, created_check_field) if created == 1: # Fetch the doc created for the prescription doc_created = frappe.db.get_value(dt, {'prescription': ref_dn}) frappe.db.set_value(dt, doc_created, 'invoiced', invoiced) def validity_exists(practitioner, patient): return frappe.db.exists({ "doctype": "Fee Validity", "practitioner": practitioner, "patient": patient}) def manage_fee_validity(appointment_name, method, ref_invoice=None): appointment_doc = frappe.get_doc("Patient Appointment", appointment_name) validity_exist = validity_exists(appointment_doc.practitioner, appointment_doc.patient) do_not_update = False visited = 0 if validity_exist: fee_validity = frappe.get_doc("Fee Validity", validity_exist[0][0]) # Check if the validity is valid if (fee_validity.valid_till >= appointment_doc.appointment_date): if (method == "on_cancel" and appointment_doc.status != "Closed"): if ref_invoice == fee_validity.ref_invoice: visited = fee_validity.visited - 1 if visited < 0: visited = 0 frappe.db.set_value("Fee Validity", fee_validity.name, "visited", visited) do_not_update = True elif (method == "on_submit" and fee_validity.visited < fee_validity.max_visit): visited = fee_validity.visited + 1 frappe.db.set_value("Fee Validity", fee_validity.name, "visited", visited) do_not_update = True else: do_not_update = False if not do_not_update: fee_validity = update_fee_validity(fee_validity, appointment_doc.appointment_date, ref_invoice) visited = fee_validity.visited else: fee_validity = create_fee_validity(appointment_doc.practitioner, appointment_doc.patient, appointment_doc.appointment_date, ref_invoice) visited = fee_validity.visited # Mark All Patient Appointment invoiced = True in the validity range do not cross the max visit if (method == "on_cancel"): invoiced = True else: invoiced = False patient_appointments = appointments_valid_in_fee_validity(appointment_doc, invoiced) if patient_appointments and fee_validity: visit = visited for appointment in patient_appointments: if (method == "on_cancel" and appointment.status != "Closed"): if ref_invoice == fee_validity.ref_invoice: visited = visited - 1 if visited < 0: visited = 0 frappe.db.set_value("Fee Validity", fee_validity.name, "visited", visited) frappe.db.set_value("Patient Appointment", appointment.name, "invoiced", False) manage_doc_for_appoitnment("Patient Encounter", appointment.name, False) elif method == "on_submit" and int(fee_validity.max_visit) > visit: if ref_invoice == fee_validity.ref_invoice: visited = visited + 1 frappe.db.set_value("Fee Validity", fee_validity.name, "visited", visited) frappe.db.set_value("Patient Appointment", appointment.name, "invoiced", True) manage_doc_for_appoitnment("Patient Encounter", appointment.name, True) if ref_invoice == fee_validity.ref_invoice: visit = visit + 1 if method == "on_cancel": ref_invoice_in_fee_validity = frappe.db.get_value("Fee Validity", fee_validity.name, 'ref_invoice') if ref_invoice_in_fee_validity == ref_invoice: frappe.delete_doc("Fee Validity", fee_validity.name) def appointments_valid_in_fee_validity(appointment, invoiced): valid_days = frappe.db.get_value("Healthcare Settings", None, "valid_days") max_visit = frappe.db.get_value("Healthcare Settings", None, "max_visit") if int(max_visit) < 1: max_visit = 1 valid_days_date = add_days(getdate(appointment.appointment_date), int(valid_days)) return frappe.get_list("Patient Appointment",{'patient': appointment.patient, 'invoiced': invoiced, 'appointment_date':("<=", valid_days_date), 'appointment_date':(">=", getdate(appointment.appointment_date)), 'practitioner': appointment.practitioner}, order_by="appointment_date", limit=int(max_visit)-1) def manage_doc_for_appoitnment(dt_from_appointment, appointment, invoiced): dn_from_appointment = frappe.db.exists( dt_from_appointment, { "appointment": appointment } ) if dn_from_appointment: frappe.db.set_value(dt_from_appointment, dn_from_appointment, "invoiced", invoiced) @frappe.whitelist() def get_drugs_to_invoice(encounter): encounter = frappe.get_doc("Patient Encounter", encounter) if encounter: patient = frappe.get_doc("Patient", encounter.patient) if patient and patient.customer: item_to_invoice = [] for drug_line in encounter.drug_prescription: if drug_line.drug_code: qty = 1 if frappe.db.get_value("Item", drug_line.drug_code, "stock_uom") == "Nos": qty = drug_line.get_quantity() description = False if drug_line.dosage: description = drug_line.dosage if description and drug_line.period: description += " for "+drug_line.period if not description: description = "" item_to_invoice.append({'drug_code': drug_line.drug_code, 'quantity': qty, 'description': description}) return item_to_invoice @frappe.whitelist() def get_children(doctype, parent, company, is_root=False): parent_fieldname = 'parent_' + doctype.lower().replace(' ', '_') fields = [ 'name as value', 'is_group as expandable', 'lft', 'rgt' ] # fields = [ 'name', 'is_group', 'lft', 'rgt' ] filters = [['ifnull(`{0}`,"")'.format(parent_fieldname), '=', '' if is_root else parent]] if is_root: fields += ['service_unit_type'] if doctype == 'Healthcare Service Unit' else [] filters.append(['company', '=', company]) else: fields += ['service_unit_type', 'allow_appointments', 'inpatient_occupancy', 'occupancy_status'] if doctype == 'Healthcare Service Unit' else [] fields += [parent_fieldname + ' as parent'] hc_service_units = frappe.get_list(doctype, fields=fields, filters=filters) if doctype == 'Healthcare Service Unit': for each in hc_service_units: occupancy_msg = "" if each['expandable'] == 1: occupied = False vacant = False child_list = frappe.db.sql(""" select name, occupancy_status from `tabHealthcare Service Unit` where inpatient_occupancy = 1 and lft > %s and rgt < %s""", (each['lft'], each['rgt'])) for child in child_list: if not occupied: occupied = 0 if child[1] == "Occupied": occupied += 1 if not vacant: vacant = 0 if child[1] == "Vacant": vacant += 1 if vacant and occupied: occupancy_total = vacant+occupied occupancy_msg = str(occupied) + " Occupied out of " + str(occupancy_total) each["occupied_out_of_vacant"] = occupancy_msg return hc_service_units
brownharryb/erpnext
erpnext/healthcare/utils.py
Python
gpl-3.0
20,066
[ "VisIt" ]
3e28c71f96aa203e5ce2b866efce28f0bdc545856ff296da86b9864bcd60a6ec
#!/usr/bin/env python # D. Jones - 2/13/14 """This code is from the IDL Astronomy Users Library""" import numpy as np def mmm( sky_vector, highbad = False, debug = False, readnoise = False, nsky = False, integer = "discrete", mxiter = 50, minsky = 20, nan=True): """Estimate the sky background in a stellar contaminated field. MMM assumes that contaminated sky pixel values overwhelmingly display POSITIVE departures from the true value. Adapted from DAOPHOT routine of the same name. CALLING SEQUENCE: skymod,sigma,skew = mmm.mmm( sky, highbad= , readnoise=, debug=, minsky=, nsky=, integer=) INPUTS: sky - Array or Vector containing sky values. This version of MMM does not require SKY to be sorted beforehand. RETURNS: skymod - Scalar giving estimated mode of the sky values sigma - Scalar giving standard deviation of the peak in the sky histogram. If for some reason it is impossible to derive skymod, then SIGMA = -1.0 skew - Scalar giving skewness of the peak in the sky histogram If no output variables are supplied or if "debug" is set then the values of skymod, sigma and skew will be printed. OPTIONAL KEYWORD INPUTS: highbad - scalar value of the (lowest) "bad" pixel level (e.g. cosmic rays or saturated pixels) If not supplied, then there is assumed to be no high bad pixels. minsky - Integer giving mininum number of sky values to be used. MMM will return an error if fewer sky elements are supplied. Default = 20. maxiter - integer giving maximum number of iterations allowed,default=50 readnoise - Scalar giving the read noise (or minimum noise for any pixel). Normally, MMM determines the (robust) median by averaging the central 20% of the sky values. In some cases where the noise is low, and pixel values are quantized a larger fraction may be needed. By supplying the optional read noise parameter, MMM is better able to adjust the fraction of pixels used to determine the median. integer - Set this keyword if the input SKY vector only contains discrete integer values. This keyword is only needed if the SKY vector is of type float or double precision, but contains only discrete integer values. (Prior to July 2004, the equivalent of /INTEGER was set for all data types) debug - If this keyword is set and non-zero, then additional information is displayed at the terminal. OPTIONAL OUTPUT KEYWORD: nsky - Integer scalar giving the number of pixels actually used for the sky computation (after outliers have been removed). NOTES: (1) Program assumes that low "bad" pixels (e.g. bad CCD columns) have already been deleted from the SKY vector. (2) MMM was updated in June 2004 to better match more recent versions of DAOPHOT. (3) Does not work well in the limit of low Poisson integer counts (4) MMM may fail for strongly skewed distributions. METHOD: The algorithm used by MMM consists of roughly two parts: (1) The average and sigma of the sky pixels is computed. These values are used to eliminate outliers, i.e. values with a low probability given a Gaussian with specified average and sigma. The average and sigma are then recomputed and the process repeated up to 20 iterations. (2) The amount of contamination by stars is estimated by comparing the mean and median of the remaining sky pixels. If the mean is larger than the median then the true sky value is estimated by 3*median - 2*mean REVISION HISTORY: Adapted to IDL from 1986 version of DAOPHOT in STSDAS W. Landsman, STX Feb, 1987 Added HIGHBAD keyword W. Landsman January, 1991 Fixed occasional problem with integer inputs W. Landsman Feb, 1994 Avoid possible 16 bit integer overflow W. Landsman November, 2001 Added READNOISE, NSKY keywords, new median computation W. Landsman June, 2004 Added INTEGER keyword W. Landsman July, 2004 Improve numerical precision W. Landsman October, 2004 Fewer aborts on strange input sky histograms W. Landsman October, 2005 Added /SILENT keyword November, 2005 Fix too many /CON keywords to MESSAGE W.L. December, 2005 Fix bug introduced June 2004 removing outliers N. Cunningham/W. Landsman January, 2006 when READNOISE not set Make sure that MESSAGE never aborts W. Landsman January, 2008 Add mxiter keyword and change default to 50 W. Landsman August, 2011 Added MINSKY keyword W.L. December, 2011 Converted to Python D. Jones January, 2014 """ if nan: sky_vector = sky_vector[np.where(sky_vector == sky_vector)] nsky = len( sky_vector ) #Get number of sky elements if nsky < minsky: sigma=-1.0 ; skew = 0.0; skymod = np.nan print(('ERROR -Input vector must contain at least '+str(minsky)+' elements')) return(skymod,sigma,skew) nlast = nsky-1 #Subscript of last pixel in SKY array if debug: print(('Processing '+str(nsky) + ' element array')) sz_sky = np.shape(sky_vector) sky = np.sort(sky_vector) #Sort SKY in ascending values skymid = 0.5*sky[int((nsky-1)/2)] + 0.5*sky[int(nsky/2)] #Median value of all sky values cut1 = np.min( [skymid-sky[0],sky[nsky-1] - skymid] ) if highbad: cut1[np.where(cut1 > highbad - skymid)[0]] = highbad - skymid cut2 = skymid + cut1 cut1 = skymid - cut1 # Select the pixels between Cut1 and Cut2 good = np.where( (sky <= cut2) & (sky >= cut1))[0] Ngood = len(good) if ( Ngood == 0 ): sigma=-1.0 ; skew = 0.0; skymod = 0.0 print(('ERROR - No sky values fall within ' + str(cut1) + \ ' and ' + str(cut2))) return(skymod,sigma,skew) delta = sky[good] - skymid #Subtract median to improve arithmetic accuracy sum = np.sum(delta.astype('float64')) sumsq = np.sum(delta.astype('float64')**2) maximm = np.max( good) ; minimm = np.min(good) # Highest value accepted at upper end of vector minimm = minimm -1 #Highest value reject at lower end of vector # Compute mean and sigma (from the first pass). medianIndex = np.int(np.floor((minimm+maximm+1)/2)) skymed = 0.5*sky[medianIndex] + \ 0.5*sky[medianIndex + 1] #median skymn = sum/(maximm-minimm) #mean sigma = np.sqrt(sumsq/(maximm-minimm)-skymn**2) #sigma skymn = skymn + skymid #Add median which was subtracted off earlier # If mean is less than the mode, then the contamination is slight, and the # mean value is what we really want. # skymod = (skymed < skymn) ? 3.*skymed - 2.*skymn : skymn if skymed < skymn: skymod = 3.*skymed - 2.*skymn else: skymod = skymn # Rejection and recomputation loop: niter = 0 clamp = 1 old = 0 # START_LOOP: redo = True while redo: niter = niter + 1 if ( niter > mxiter ): sigma=-1.0 ; skew = 0.0 print(('ERROR - Too many ('+str(mxiter) + ') iterations,' + \ ' unable to compute sky')) # import pdb; pdb.set_trace() return(skymod,sigma,skew) if ( maximm-minimm < minsky ): #Error? sigma = -1.0 ; skew = 0.0 print(('ERROR - Too few ('+str(maximm-minimm) + \ ') valid sky elements, unable to compute sky')) return(skymod,sigma,skew) # Compute Chauvenet rejection criterion. r = np.log10( float( maximm-minimm ) ) r = np.max( [ 2., ( -0.1042*r + 1.1695)*r + 0.8895 ] ) # Compute rejection limits (symmetric about the current mode). cut = r*sigma + 0.5*np.abs(skymn-skymod) # if integer: cut = cut > 1.5 cut1 = skymod - cut ; cut2 = skymod + cut # # Recompute mean and sigma by adding and/or subtracting sky values # at both ends of the interval of acceptable values. redo = False newmin = minimm if sky[newmin+1] >= cut1: tst_min = 1 #Is minimm+1 above current CUT? else: tst_min = 0 if (newmin == -1) and tst_min: done = 1 #Are we at first pixel of SKY? else: done = 0 if not done: if newmin > 0: skyind = newmin else: skyind = 0 if (sky[skyind] < cut1) and tst_min: done = 1 if not done: istep = 1 - 2*int(tst_min) while not done: newmin = newmin + istep if (newmin == -1) | (newmin == nlast): done = 1 if not done: if (sky[newmin] <= cut1) and (sky[newmin+1] >= cut1): done = 1 if tst_min: delta = sky[newmin+1:minimm+1] - skymid else: delta = sky[minimm+1:newmin+1] - skymid sum = sum - istep*np.sum(delta) sumsq = sumsq - istep*np.sum(delta**2) redo = True minimm = newmin newmax = maximm if sky[maximm] <= cut2: tst_max = 1 #Is current maximum below upper cut? else: tst_max = 0 if (maximm == nlast) and tst_max: done = 1 else: done = 0 #Are we at last pixel of SKY array? if not done: if maximm+1 < nlast: skyind = maximm+1 else: skyind = nlast if ( tst_max ) and (sky[skyind] > cut2): done = 1 if not done: # keep incrementing newmax istep = -1 + 2*int(tst_max) #Increment up or down? while not done: newmax = newmax + istep if (newmax == nlast) or (newmax == -1): done = 1 if not done: if ( sky[newmax] <= cut2 ) and ( sky[newmax+1] >= cut2 ): done = 1 if tst_max: delta = sky[maximm+1:newmax+1] - skymid else: delta = sky[newmax+1:maximm+1] - skymid sum = sum + istep*np.sum(delta) sumsq = sumsq + istep*np.sum(delta**2) redo = True maximm = newmax # # Compute mean and sigma (from this pass). # nsky = maximm - minimm if ( nsky < minsky ): # error? sigma = -1.0 ; skew = 0.0 print('ERROR - Outlier rejection left too few sky elements') return(skymod,sigma,skew) skymn = sum/nsky var = sumsq/nsky - skymn**2 if var < 0: var = 0 sigma = float( np.sqrt( var )) skymn = skymn + skymid # Determine a more robust median by averaging the central 20% of pixels. # Estimate the median using the mean of the central 20 percent of sky # values. Be careful to include a perfectly symmetric sample of pixels about # the median, whether the total number is even or odd within the acceptance # interval center = (minimm + 1 + maximm)/2. side = np.round(0.2*(maximm-minimm))/2. + 0.25 j = np.int(np.round(center-side)) k = np.int(np.round(center+side)) # In case the data has a large number of of the same (quantized) # intensity, expand the range until both limiting values differ from the # central value by at least 0.25 times the read noise. if readnoise: L = round(center-0.25) M = round(center+0.25) R = 0.25*readnoise while ((j > 0) and (k < nsky-1) and \ ( ((sky[L] - sky[j]) < R) or ((sky[k] - sky[M]) < R))): j -= 1 k += 1 skymed = np.sum(sky[j:k+1])/(k-j+1) # If the mean is less than the median, then the problem of contamination # is slight, and the mean is what we really want. if skymed < skymn : dmod = 3.*skymed-2.*skymn-skymod else: dmod = skymn - skymod # prevent oscillations by clamping down if sky adjustments are changing sign if dmod*old < 0: clamp = 0.5*clamp skymod = skymod + clamp*dmod old = dmod # if redo then goto, START_LOOP # skew = float( (skymn-skymod)/max([1.,sigma]) ) nsky = maximm - minimm if debug: print(('% MMM: Number of unrejected sky elements: ', str(nsky,2), \ ' Number of iterations: ', str(niter))) print(('% MMM: Mode, Sigma, Skew of sky vector:', skymod, sigma, skew )) return(skymod,sigma,skew)
davidharvey1986/pyRRG
src/mmm.py
Python
mit
14,106
[ "Gaussian" ]
cb02c2a43749ae67f355911f84f43753f669a96b4fed09c007d3f2eb1bc41760
import numpy as np from math import sqrt, exp def tri2full(H_nn, UL='L'): """Fill in values of hermitian matrix. Fill values in lower or upper triangle of H_nn based on the opposite triangle, such that the resulting matrix is symmetric/hermitian. UL='U' will copy (conjugated) values from upper triangle into the lower triangle. UL='L' will copy (conjugated) values from lower triangle into the upper triangle. """ N, tmp = H_nn.shape assert N == tmp, 'Matrix must be square' #assert np.isreal(H_nn.diagonal()).all(), 'Diagonal should be real' if UL != 'L': H_nn = H_nn.T for n in range(N - 1): H_nn[n, n + 1:] = H_nn[n + 1:, n].conj() def dagger(matrix): return np.conj(matrix.T) def rotate_matrix(h, u): return np.dot(u.T.conj(), np.dot(h, u)) def get_subspace(matrix, index): """Get the subspace spanned by the basis function listed in index""" assert matrix.ndim == 2 and matrix.shape[0] == matrix.shape[1] return matrix.take(index, 0).take(index, 1) permute_matrix = get_subspace def normalize(matrix, S=None): """Normalize column vectors. :: <matrix[:,i]| S |matrix[:,i]> = 1 """ for col in matrix.T: if S is None: col /= np.linalg.norm(col) else: col /= np.sqrt(np.dot(col.conj(), np.dot(S, col))) def subdiagonalize(h_ii, s_ii, index_j): nb = h_ii.shape[0] nb_sub = len(index_j) h_sub_jj = get_subspace(h_ii, index_j) s_sub_jj = get_subspace(s_ii, index_j) e_j, v_jj = np.linalg.eig(np.linalg.solve(s_sub_jj, h_sub_jj)) normalize(v_jj, s_sub_jj) # normalize: <v_j|s|v_j> = 1 permute_list = np.argsort(e_j.real) e_j = np.take(e_j, permute_list) v_jj = np.take(v_jj, permute_list, axis=1) #setup transformation matrix c_ii = np.identity(nb, complex) for i in xrange(nb_sub): for j in xrange(nb_sub): c_ii[index_j[i], index_j[j]] = v_jj[i, j] h1_ii = rotate_matrix(h_ii, c_ii) s1_ii = rotate_matrix(s_ii, c_ii) return h1_ii, s1_ii, c_ii, e_j def cutcoupling(h, s, index_n): for i in index_n: s[:, i] = 0.0 s[i, :] = 0.0 s[i, i] = 1.0 Ei = h[i, i] h[:, i] = 0.0 h[i, :] = 0.0 h[i, i] = Ei def fermidistribution(energy, kt): #fermi level is fixed to zero return 1.0 / (1.0 + np.exp(energy / kt) ) def fliplr(a): length=len(a) b = [0] * length for i in range(length): b[i] = a[length - i - 1] return b def plot_path(energy): import pylab pylab.plot(np.real(energy), np.imag(energy), 'b--o') pylab.show() def function_integral(function, calcutype): #return the integral of the 'function' on 'intrange' #the function can be a value or a matrix, arg1,arg2 are the possible #parameters of the function intctrl = function.intctrl if calcutype == 'eqInt': intrange = intctrl.eqintpath tol = intctrl.eqinttol if hasattr(function.intctrl, 'eqpath_radius'): radius = function.intctrl.eqpath_radius else: radius = -1 if hasattr(function.intctrl, 'eqpath_origin'): origin = function.intctrl.eqpath_origin else: origin = 1000 elif calcutype == 'neInt': intrange = intctrl.neintpath tol = intctrl.neinttol radius = -1 origin = 1000 elif calcutype == 'locInt': intrange = intctrl.locintpath tol = intctrl.locinttol if hasattr(function.intctrl, 'locpath_radius'): radius = function.intctrl.locpath_radius else: radius = -1 if hasattr(function.intctrl, 'locpath_origin'): origin = function.intctrl.locpath_origin else: origin = 1000 trace = 0 a = 0. b = 1. #Initialize with 13 function evaluations. c = (a + b) / 2 h = (b - a) / 2 realmin = 2e-17 s = [.942882415695480, sqrt(2.0/3), .641853342345781, 1/sqrt(5.0), .236383199662150] s1 = [0] * len(s) s2 = [0] * len(s) for i in range(len(s)): s1[i] = c - s[i] * h s2[i] = c + fliplr(s)[i] * h x0 = [a] + s1 + [c] + s2 + [b] s0 = [.0158271919734802, .094273840218850, .155071987336585, .188821573960182, .199773405226859, .224926465333340] w0 = s0 + [.242611071901408] + fliplr(s0) w1 = [1, 0, 0, 0, 5, 0, 0, 0, 5, 0, 0, 0, 1] w2 = [77, 0, 432, 0, 625, 0, 672, 0, 625, 0, 432, 0, 77] for i in range(len(w1)): w1[i] = w1[i] / 6.0 w2[i] = w2[i] / 1470.0 dZ = [intrange[:len(intrange) - 1], intrange[1:]] hmin = [0] * len(dZ[1]) path_type = [] for i in range(len(intrange) - 1): rs = np.abs(dZ[0][i] - origin) re = np.abs(dZ[1][i] - origin) if abs(rs - radius) < 1.0e-8 and abs(re - radius) < 1.0e-8: path_type.append('half_circle') else: path_type.append('line') for i in range(len(dZ[1])): if path_type[i] == 'half_circle': dZ[0][i] = 0 dZ[1][i] = np.pi for i in range(len(dZ[1])): dZ[1][i] = dZ[1][i] - dZ[0][i] hmin[i] = realmin / 1024 * abs(dZ[1][i]) temp = np.array([[1] * 13, x0]).transpose() Zx = np.dot(temp, np.array(dZ)) Zxx = [] for i in range(len(intrange) - 1): for j in range(13): Zxx.append(Zx[j][i]) ns = 0 ne = 12 if path_type[0] == 'line': yns = function.calgfunc(Zxx[ns], calcutype) elif path_type[0] == 'half_circle': energy = origin + radius * np.exp((np.pi - Zxx[ns + i]) * 1.j) yns = -1.j * radius * np.exp(-1.j* Zxx[ns +i])* function.calgfunc(energy, calcutype) fcnt = 0 for n in range(len(intrange)-1): # below evaluate the integral and adjust the tolerance Q1pQ0 = yns * (w1[0] - w0[0]) Q2pQ0 = yns * (w2[0] - w0[0]) fcnt = fcnt + 12 for i in range(1,12): if path_type[n] == 'line': yne = function.calgfunc(Zxx[ns + i], calcutype) elif path_type[n] == 'half_circle': energy = origin + radius * np.exp((np.pi -Zxx[ns + i]) * 1.j) yne = -1.j * radius * np.exp(-1.j * Zxx[ns + i])* function.calgfunc(energy, calcutype) Q1pQ0 += yne * (w1[i] - w0[i]) Q2pQ0 += yne * (w2[i] - w0[i]) # Increase the tolerance if refinement appears to be effective r = np.abs(Q2pQ0) / (np.abs(Q1pQ0) + np.abs(realmin)) dim = np.product(r.shape) r = np.sum(r) / dim if r > 0 and r < 1: thistol = tol / r else: thistol = tol if path_type[n] == 'line': yne = function.calgfunc(Zxx[ne], calcutype) elif path_type[n] == 'half_circle': energy = origin + radius * np.exp((np.pi -Zxx[ne]) * 1.j) yne = -1.j * radius * np.exp(-1.j * Zxx[ne])* function.calgfunc(energy, calcutype) #Call the recursive core integrator Qk, xpk, wpk, fcnt, warn = quadlstep(function, Zxx[ns], Zxx[ne], yns, yne, thistol, trace, fcnt, hmin[n], calcutype, path_type[n], origin, radius) if n == 0: Q = np.copy(Qk) Xp = xpk[:] Wp = wpk[:] else: Q += Qk Xp = Xp[:-1] + xpk Wp = Wp[:-1] + [Wp[-1] + wpk[0]] + wpk[1:] if warn == 1: print 'warning: Minimum step size reached,singularity possible' elif warn == 2: print 'warning: Maximum function count excced; singularity likely' elif warn == 3: print 'warning: Infinite or Not-a-Number function value encountered' else: pass ns += 13 ne += 13 yns = np.copy(yne) return Q,Xp,Wp,fcnt def quadlstep(f, Za, Zb, fa, fb, tol, trace, fcnt, hmin, calcutype, path_type, origin, radius): #Gaussian-Lobatto and Kronrod method #QUADLSTEP Recursive core routine for integral #input parameters: # f ---------- function, here we just use the module calgfunc # to return the value, if wanna use it for # another one, change it # Za, Zb ---------- the start and end point of the integral # fa, fb ---------- the function value on Za and Zb # fcnt ---------- the number of the funtion recalled till now #output parameters: # Q ---------- integral # Xp ---------- selected points # Wp ---------- weight # fcnt ---------- the number of the function recalled till now maxfcnt = 10000 # Evaluate integrand five times in interior of subintrval [a,b] Zh = (Zb - Za) / 2.0 if abs(Zh) < hmin: # Minimun step size reached; singularity possible Q = Zh * (fa + fb) if path_type == 'line': Xp = [Za, Zb] elif path_type == 'half_circle': Xp = [origin + radius * np.exp((np.pi - Za) * 1.j), origin + radius * np.exp((np.pi - Zb) * 1.j)] Wp = [Zh, Zh] warn = 1 return Q, Xp, Wp, fcnt, warn fcnt += 5 if fcnt > maxfcnt: #Maximum function count exceed; singularity likely Q = Zh * (fa + fb) if path_type == 'line': Xp = [Za, Zb] elif path_type == 'half_circle': Xp = [origin + radius * np.exp((np.pi - Za) * 1.j), origin + radius * np.exp((np.pi - Zb) * 1.j)] Wp = [Zh, Zh] warn = 2 return Q, Xp, Wp, fcnt, warn x = [0.18350341907227, 0.55278640450004, 1.0, 1.44721359549996, 1.81649658092773]; Zx = [0] * len(x) y = [0] * len(x) for i in range(len(x)): x[i] *= 0.5 Zx[i] = Za + (Zb - Za) * x[i] if path_type == 'line': y[i] = f.calgfunc(Zx[i], calcutype) elif path_type == 'half_circle': energy = origin + radius * np.exp((np.pi - Zx[i]) * 1.j) y[i] = f.calgfunc(energy, calcutype) #Four point Lobatto quadrature s1 = [1.0, 0.0, 5.0, 0.0, 5.0, 0.0, 1.0] s2 = [77.0, 432.0, 625.0, 672.0, 625.0, 432.0, 77.0] Wk = [0] * 7 Wp = [0] * 7 for i in range(7): Wk[i] = (Zh / 6.0) * s1[i] Wp[i] = (Zh / 1470.0) * s2[i] if path_type == 'line': Xp = [Za] + Zx + [Zb] elif path_type == 'half_circle': Xp = [Za] + Zx + [Zb] for i in range(7): factor = -1.j * radius * np.exp(1.j * (np.pi - Xp[i])) Wk[i] *= factor Wp[i] *= factor Xp[i] = origin + radius * np.exp((np.pi - Xp[i]) * 1.j) Qk = fa * Wk[0] + fb * Wk[6] Q = fa * Wp[0] + fb * Wp[6] for i in range(1, 6): Qk += y[i-1] * Wk[i] Q += y[i-1] * Wp[i] if np.isinf(np.max(np.abs(Q))): Q = Zh * (fa + fb) if path_type == 'line': Xp = [Za, Zb] elif path_type == 'half_circle': Xp = [origin + radius * np.exp((np.pi - Za) * 1.j), origin + radius * np.exp((np.pi - Zb) * 1.j)] Wp = [Zh, Zh] warn = 3 return Qk, Xp, Wp, fcnt, warn else: pass if trace: print fcnt, real(Za), imag(Za), abs(Zh) #Check accurancy of integral over this subinterval XXk = [Xp[0], Xp[2], Xp[4], Xp[6]] WWk = [Wk[0], Wk[2], Wk[4], Wk[6]] YYk = [fa, y[1], y[3], fb] if np.max(np.abs(Qk - Q)) <= tol: warn = 0 return Q, XXk, WWk, fcnt, warn #Subdivide into six subintevals else: Q, Xk, Wk, fcnt, warn = quadlstep(f, Za, Zx[1], fa, YYk[1], tol, trace, fcnt, hmin, calcutype, path_type, origin, radius) Qk, xkk, wkk, fcnt, warnk = quadlstep(f, Zx[1], Zx[3], YYk[1], YYk[2], tol, trace, fcnt, hmin, calcutype, path_type, origin, radius) Q += Qk Xk = Xk[:-1] + xkk Wk = Wk[:-1] + [Wk[-1] + wkk[0]] + wkk[1:] warn = max(warn, warnk) Qk, xkk, wkk, fcnt, warnk = quadlstep(f, Zx[3], Zb, YYk[2], fb, tol, trace, fcnt, hmin, calcutype, path_type, origin, radius) Q += Qk Xk = Xk[:-1] + xkk Wk = Wk[:-1] + [Wk[-1] + wkk[0]] + wkk[1:] warn = max(warn, warnk) return Q, Xk, Wk, fcnt, warn def mytextread0(filename): num = 0 df = file(filename) df.seek(0) for line in df: if num == 0: dim = line.strip().split(' ') row = int(dim[0]) col = int(dim[1]) mat = np.empty([row, col]) else: data = line.strip().split(' ') if len(data) == 0 or len(data)== 1: break else: for i in range(len(data)): mat[num - 1, i] = float(data[i]) num += 1 return mat def mytextread1(filename): num = 0 df = file(filename) df.seek(0) data = [] for line in df: tmp = line.strip() if len(tmp) != 0: data.append(float(tmp)) else: break dim = int(sqrt(len(data))) mat = np.empty([dim, dim]) for i in range(dim): for j in range(dim): mat[i, j] = data[num] num += 1 return mat def mytextwrite1(filename, mat): num = 0 df = open(filename,'w') df.seek(0) dim = mat.shape[0] if dim != mat.shape[1]: print 'matwirte, matrix is not square' for i in range(dim): for j in range(dim): df.write('%20.20e\n'% mat[i, j]) df.close()
JConwayAWT/PGSS14CC
lib/python/multimetallics/ase/transport/tools.py
Python
gpl-2.0
14,415
[ "Gaussian" ]
053f578c1a6c61f376ffe6f585a003173e0cd0fcd9bce2ef822214b9677a6438
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # prep cmip5 data downloaded using SYNDA for a re-do of the Relative Humidity with the CORRECT variable # author: Michael Lindgren -- Sept. 2018 # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # import pandas as pd import os class Files( object ): def __init__( self, base_dir, *args, **kwargs ): ''' list the files from the nested directory structure generated by SYNDA application and access of the ESGF CMIP5 data holdings ''' self.base_dir = base_dir self.files = self.list_files( ) self.df = self._to_dataframe( ) def list_files( self ): return [ os.path.join( root, fn ) for root, subs, files in os.walk( self.base_dir ) \ if len( files ) > 0 for fn in files if fn.endswith( '.nc' ) ] @staticmethod def _split_fn( fn ): return os.path.basename( fn ).split( '.' )[0].split( '_' ) @staticmethod def f( x ): ''' take the files dataframe and split the years into begin year/month and end year/month and add new columns to a new dataframe ''' begin, end = x[ 'years' ].split( '-' ) x['begin_month'] = begin[4:] x['begin_year'] = begin[:4] x['end_month'] = end[4:] x['end_year'] = end[:4] return x def _to_dataframe( self ): import pandas as pd out = [] for fn in self.files: variable, cmor_table, model, scenario, experiment, years = self._split_fn( fn ) out.append( {'fn':fn, 'variable':variable, 'cmor_table':cmor_table, \ 'model':model, 'scenario':scenario, 'experiment':experiment, 'years':years } ) column_order = ['fn', 'variable', 'cmor_table', 'model', 'scenario', 'experiment', 'years'] return pd.DataFrame( out, columns=column_order ).apply( self.f, axis=1 ) if __name__ == '__main__': # PREP THE INPUT NETCDF FILES FROM AR5 import itertools, glob, os from downscale import preprocess # some setup args base_dir = '/workspace/Shared/Tech_Projects/DeltaDownscaling/project_data/cmip5/raw_download_rhs/data_raw_sorted' prepped_dir = '/workspace/Shared/Tech_Projects/DeltaDownscaling/project_data/cmip5/prepped' variables = [ 'hurs' ] scenarios = [ 'historical', 'rcp26', 'rcp45', 'rcp60', 'rcp85' ] models = [ 'IPSL-CM5A-LR', 'MRI-CGCM3', 'GISS-E2-R', 'GFDL-CM3', 'CCSM4' ] if not os.path.exists( prepped_dir ): os.makedirs( prepped_dir ) # lets get the darn data returned that we want: files_df = Files( base_dir )._to_dataframe( ) log = open( os.path.join( prepped_dir, 'log_file_prep.txt'), 'w' ) for variable, model, scenario in itertools.product( variables, models, scenarios ): print( 'prepping: {} {} {}'.format( variable, model, scenario ) ) # get the files we want to work with for this run cur_files = files_df[ (files_df.variable == variable) & (files_df.model == model) & (files_df.scenario == scenario) ]['fn'].tolist() if 'historical' in scenario: years = (1860,2005) else: years = (2006,2100) raw_path = os.path.dirname( cur_files[0] ) output_path = os.path.join( prepped_dir, model, scenario, variable ) if not os.path.exists( output_path ): os.makedirs( output_path ) experiment = 'r1i1p1' # try: pp = preprocess.Preprocess( raw_path, variable, model, scenario, experiment, years ) pp.write_nc( output_path, True ) # except: # print( 'ERROR!' ) # log.write( 'error : %s - %s - %s - %s - %s - %s \n\n' % (raw_path, variable, model, scenario, experiment, years) ) # pass # close the log file log.flush() log.close()
ua-snap/downscale
snap_scripts/downscaling_v2/prep_raw_cmip5_netcdf_hurs_tem.py
Python
mit
3,571
[ "NetCDF" ]
52915b092597f0e8612da8a5a9941a1389b41d281510fac38535ff6c9ae3d0f1
# -*- coding: utf-8 -*- # Copyright (C) 2017 by Pedro Mendes, Virginia Tech Intellectual # Properties, Inc., University of Heidelberg, and University of # of Connecticut School of Medicine. # All rights reserved. # Copyright (C) 2010 - 2016 by Pedro Mendes, Virginia Tech Intellectual # Properties, Inc., University of Heidelberg, and The University # of Manchester. # All rights reserved. # Copyright (C) 2009 by Pedro Mendes, Virginia Tech Intellectual # Properties, Inc., EML Research, gGmbH, University of Heidelberg, # and The University of Manchester. # All rights reserved. # # This is an example on how to run an parameter fitting task. # The example creates a simple model and runs a time course simulation on it. # The timecourse data is written to file with some noise added to it. # This data is used to fit the original parameters. # from COPASI import * import sys from random import random MODEL_STRING="""<?xml version=\"1.0\" encoding=\"UTF-8\"?> <!-- Created by COPASI version 4.5.30 (Debug) on 2009-03-30 08:01 with libSBML version 3.3.2. --> <sbml xmlns=\"http://www.sbml.org/sbml/level2\" level=\"2\" version=\"1\"> <model metaid=\"COPASI1\" id=\"Model_1\" name=\"Model\"> <listOfUnitDefinitions> <unitDefinition id=\"volume\"> <listOfUnits> <unit kind=\"litre\" scale=\"-3\"/> </listOfUnits> </unitDefinition> <unitDefinition id=\"substance\"> <listOfUnits> <unit kind=\"mole\" scale=\"-3\"/> </listOfUnits> </unitDefinition> </listOfUnitDefinitions> <listOfCompartments> <compartment id=\"compartment_1\" name=\"compartment\" size=\"1\"/> </listOfCompartments> <listOfSpecies> <species id=\"species_1\" name=\"A\" compartment=\"compartment_1\" initialConcentration=\"5\"/> <species id=\"species_2\" name=\"B\" compartment=\"compartment_1\" initialConcentration=\"0\"/> <species id=\"species_3\" name=\"C\" compartment=\"compartment_1\" initialConcentration=\"0\"/> </listOfSpecies> <listOfReactions> <reaction id=\"reaction_1\" name=\"reaction\" reversible=\"false\"> <listOfReactants> <speciesReference species=\"species_1\"/> </listOfReactants> <listOfProducts> <speciesReference species=\"species_2\"/> </listOfProducts> <kineticLaw> <math xmlns=\"http://www.w3.org/1998/Math/MathML\"> <apply> <times/> <ci> compartment_1 </ci> <ci> k1 </ci> <ci> species_1 </ci> </apply> </math> <listOfParameters> <parameter id=\"k1\" name=\"k1\" value=\"0.03\"/> </listOfParameters> </kineticLaw> </reaction> <reaction id=\"reaction_2\" name=\"reaction_1\" reversible=\"false\"> <listOfReactants> <speciesReference species=\"species_2\"/> </listOfReactants> <listOfProducts> <speciesReference species=\"species_3\"/> </listOfProducts> <kineticLaw> <math xmlns=\"http://www.w3.org/1998/Math/MathML\"> <apply> <times/> <ci> compartment_1 </ci> <ci> k1 </ci> <ci> species_2 </ci> </apply> </math> <listOfParameters> <parameter id=\"k1\" name=\"k1\" value=\"0.004\"/> </listOfParameters> </kineticLaw> </reaction> </listOfReactions> </model> </sbml>""" def main(): assert CRootContainer.getRoot() != None # create a datamodel dataModel = CRootContainer.addDatamodel() assert CRootContainer.getDatamodelList().size() == 1 # first we load a simple model try: # load the model dataModel.importSBMLFromString(MODEL_STRING) except: sys.stderr.write("Error while importing the model.\n") return 1 # now we need to run some time course simulation to get data to fit # against # get the trajectory task object trajectoryTask = dataModel.getTask("Time-Course") assert trajectoryTask != None # run a deterministic time course trajectoryTask.setMethodType(CTaskEnum.Method_deterministic) # pass a pointer of the model to the problem trajectoryTask.getProblem().setModel(dataModel.getModel()) # activate the task so that it will be run when the model is saved # and passed to CopasiSE trajectoryTask.setScheduled(True) # get the problem for the task to set some parameters problem = trajectoryTask.getProblem() # simulate 4000 steps problem.setStepNumber(4000) # start at time 0 dataModel.getModel().setInitialTime(0.0) # simulate a duration of 400 time units problem.setDuration(400) # tell the problem to actually generate time series data problem.setTimeSeriesRequested(True) # set some parameters for the LSODA method through the method method = trajectoryTask.getMethod() result=True try: # now we run the actual trajectory result=trajectoryTask.processWithOutputFlags(True, CCopasiTask.ONLY_TIME_SERIES) except: sys.stderr.write("Error. Running the time course simulation failed.\n" ) sys.stderr.write(trajectoryTask.getProcessWarning()) sys.stderr.write(trajectoryTask.getProcessError()) # check if there are additional error messages if CCopasiMessage.size() > 0: # print the messages in chronological order sys.stderr.write(CCopasiMessage.getAllMessageText(True)) return 1 if result==False: sys.stderr.write("An error occured while running the time course simulation.\n" ) dataModel.saveModel('test.cps', True) sys.stderr.write(trajectoryTask.getProcessWarning()) sys.stderr.write(trajectoryTask.getProcessError()) # check if there are additional error messages if CCopasiMessage.size() > 0: # print the messages in chronological order sys.stderr.write(CCopasiMessage.getAllMessageText(True)) return 1 # we write the data to a file and add some noise to it # This is necessary since COPASI can only read experimental data from # file. timeSeries = trajectoryTask.getTimeSeries() # we simulated 100 steps, including the initial state, this should be # 101 step in the timeseries assert timeSeries.getRecordedSteps() == 4001 iMax = timeSeries.getNumVariables() # there should be four variables, the three metabolites and time assert iMax == 4 lastIndex = timeSeries.getRecordedSteps() - 1 # open the file # we need to remember in which order the variables are written to file # since we need to specify this later in the parameter fitting task indexSet=[] metabVector=[] # write the header # the first variable in a time series is a always time, for the rest # of the variables, we use the SBML id in the header rand=0.0 os=open("fakedata_example6.txt","w") os.write("# time ") keyFactory=CRootContainer.getKeyFactory() assert keyFactory != None for i in range(1,iMax): key=timeSeries.getKey(i) object=keyFactory.get(key) assert object != None # only write header data or metabolites if object.__class__==CMetab: os.write(", ") os.write(timeSeries.getSBMLId(i,dataModel)) indexSet.append(i) metabVector.append(object) os.write("\n") data=0.0 for i in range(0,lastIndex): s="" for j in range(0,iMax): # we only want to write the data for metabolites # the compartment does not interest us here if j==0 or (j in indexSet): # write the data with some noise (+-5% max) rand=random() data=timeSeries.getConcentrationData(i, j) # don't add noise to the time if j!=0: data+=data*(rand*0.1-0.05) s=s+str(data) s=s+", " # remove the last two characters again os.write(s[0:-2]) os.write("\n") os.close() # now we change the parameter values to see if the parameter fitting # can really find the original values rand=random()*10 reaction=dataModel.getModel().getReaction(0) # we know that it is an irreversible mass action, so there is one # parameter assert reaction.getParameters().size() == 1 assert reaction.isLocalParameter(0) # the parameter of a irreversible mass action is called k1 reaction.setParameterValue("k1",rand) reaction=dataModel.getModel().getReaction(1) # we know that it is an irreversible mass action, so there is one # parameter assert reaction.getParameters().size() == 1 assert reaction.isLocalParameter(0) reaction.setParameterValue("k1",rand) fitTask=dataModel.addTask(CTaskEnum.Task_parameterFitting) assert fitTask != None # the method in a fit task is an instance of COptMethod or a subclass of # it. fitMethod=fitTask.getMethod() assert fitMethod != None # the object must be an instance of COptMethod or a subclass thereof # (CFitMethod) fitProblem=fitTask.getProblem() assert fitProblem != None experimentSet=fitProblem.getParameter("Experiment Set") assert experimentSet != None # first experiment (we only have one here) experiment=CExperiment(dataModel) assert experiment != None # tell COPASI where to find the data # reading data from string is not possible with the current C++ API experiment.setFileName("fakedata_example6.txt") # we have to tell COPASI that the data for the experiment is a komma # separated list (the default is TAB separated) experiment.setSeparator(",") # the data start in row 1 and goes to row 4001 experiment.setFirstRow(1) assert experiment.getFirstRow()==1 experiment.setLastRow(4001) assert experiment.getLastRow()==4001 experiment.setHeaderRow(1) assert experiment.getHeaderRow()==1 experiment.setExperimentType(CTaskEnum.Task_timeCourse) assert experiment.getExperimentType()==CTaskEnum.Task_timeCourse experiment.setNumColumns(4) assert experiment.getNumColumns()==4 objectMap=experiment.getObjectMap() assert objectMap != None result=objectMap.setNumCols(4) assert result == True result=objectMap.setRole(0,CExperiment.time) assert result == True assert objectMap.getRole(0) == CExperiment.time model=dataModel.getModel() assert model!=None timeReference=model.getValueReference() assert timeReference != None objectMap.setObjectCN(0,timeReference.getCN().getString()) # now we tell COPASI which column contain the concentrations of # metabolites and belong to dependent variables objectMap.setRole(1,CExperiment.dependent) metab=metabVector[0] assert metab != None particleReference=metab.getConcentrationReference() assert particleReference != None objectMap.setObjectCN(1,particleReference.getCN().getString()) objectMap.setRole(2,CExperiment.dependent) metab=metabVector[1] assert metab != None particleReference=metab.getConcentrationReference() assert particleReference != None objectMap.setObjectCN(2,particleReference.getCN().getString()) objectMap.setRole(3,CExperiment.dependent) metab=metabVector[2] assert metab != None particleReference=metab.getConcentrationReference() assert particleReference != None objectMap.setObjectCN(3,particleReference.getCN().getString()) experimentSet.addExperiment(experiment) assert experimentSet.getExperimentCount()==1 # addExperiment makes a copy, so we need to get the added experiment # again experiment=experimentSet.getExperiment(0) assert experiment != None # now we have to define the two fit items for the two local parameters # of the two reactions reaction=model.getReaction(0) assert reaction != None assert reaction.isLocalParameter(0)==True parameter=reaction.getParameters().getParameter(0) assert parameter != None # define a CFitItem parameterReference=parameter.getValueReference() assert parameterReference != None fitItem1=CFitItem(dataModel) assert fitItem1 !=None fitItem1.setObjectCN(parameterReference.getCN()) fitItem1.setStartValue(4.0) fitItem1.setLowerBound(CCommonName("0.00001")) fitItem1.setUpperBound(CCommonName("10")) # add the fit item to the correct parameter group optimizationItemGroup=fitProblem.getParameter("OptimizationItemList") assert optimizationItemGroup != None optimizationItemGroup.addParameter(fitItem1) reaction=model.getReaction(1) assert reaction != None assert reaction.isLocalParameter(0)==True parameter=reaction.getParameters().getParameter(0) assert parameter != None # define a CFitItem parameterReference=parameter.getValueReference() assert parameterReference != None fitItem2=CFitItem(dataModel) assert fitItem2 !=None fitItem2.setObjectCN(parameterReference.getCN()) fitItem2.setStartValue(4.0) fitItem2.setLowerBound(CCommonName("0.00001")) fitItem2.setUpperBound(CCommonName("10")) # add the fit item to the correct parameter group optimizationItemGroup.addParameter(fitItem2) result=True try: # running the task for this example will probably take some time print ("This can take some time...") result=fitTask.processWithOutputFlags(True, CCopasiTask.ONLY_TIME_SERIES) except: sys.stderr.write("Error. Parameter fitting failed.\n") return 1 if result==False: sys.stderr.write("An error occured while running the Parameter estimation.\n") dataModel.saveModel('test.cps', True) sys.stderr.write(fitTask.getProcessWarning()) sys.stderr.write(fitTask.getProcessError()) # check if there are additional error messages if CCopasiMessage.size() > 0: # print the messages in chronological order sys.stderr.write(CCopasiMessage.getAllMessageText(True)) return 1 assert result == True # assert that there are two optimization items assert len(fitProblem.getOptItemList()) == 2 # the order should be the order in whih we added the items above optItem1 = fitProblem.getOptItemList()[0] optItem2 = fitProblem.getOptItemList()[1] # the actual results are stored in the fit problem assert fitProblem.getSolutionVariables().size() == 2 print ("value for " , optItem1.getObject().getCN().getString() , ": " , fitProblem.getSolutionVariables().get(0)) print ("value for " , optItem2.getObject().getCN().getString() , ": " , fitProblem.getSolutionVariables().get(1)) # depending on the noise, the fit can be quite bad, so we are a litle # relaxed here (we should be within 3% of the original values) assert (abs(fitProblem.getSolutionVariables().get(0) - 0.03) / 0.03) < 3e-2 assert (abs(fitProblem.getSolutionVariables().get(1) - 0.004) / 0.004) < 3e-2 if(__name__ == '__main__'): main()
jonasfoe/COPASI
copasi/bindings/python/examples/example6.py
Python
artistic-2.0
14,941
[ "COPASI" ]
e39e260f700ec194667847f1691b09fd4169573063f27e66f7b1e79366251120
__all__ = ['threshold_adaptive', 'threshold_otsu', 'threshold_yen', 'threshold_isodata', 'threshold_li', ] import numpy as np from scipy import ndimage as ndi from ..exposure import histogram from .._shared.utils import assert_nD def threshold_adaptive(image, block_size, method='gaussian', offset=0, mode='reflect', param=None): """Applies an adaptive threshold to an array. Also known as local or dynamic thresholding where the threshold value is the weighted mean for the local neighborhood of a pixel subtracted by a constant. Alternatively the threshold can be determined dynamically by a a given function using the 'generic' method. Parameters ---------- image : (N, M) ndarray Input image. block_size : int Uneven size of pixel neighborhood which is used to calculate the threshold value (e.g. 3, 5, 7, ..., 21, ...). method : {'generic', 'gaussian', 'mean', 'median'}, optional Method used to determine adaptive threshold for local neighbourhood in weighted mean image. * 'generic': use custom function (see `param` parameter) * 'gaussian': apply gaussian filter (see `param` parameter for custom\ sigma value) * 'mean': apply arithmetic mean filter * 'median': apply median rank filter By default the 'gaussian' method is used. offset : float, optional Constant subtracted from weighted mean of neighborhood to calculate the local threshold value. Default offset is 0. mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional The mode parameter determines how the array borders are handled, where cval is the value when mode is equal to 'constant'. Default is 'reflect'. param : {int, function}, optional Either specify sigma for 'gaussian' method or function object for 'generic' method. This functions takes the flat array of local neighbourhood as a single argument and returns the calculated threshold for the centre pixel. Returns ------- threshold : (N, M) ndarray Thresholded binary image References ---------- .. [1] http://docs.opencv.org/modules/imgproc/doc/miscellaneous_transformations.html?highlight=threshold#adaptivethreshold Examples -------- >>> from skimage.data import camera >>> image = camera()[:50, :50] >>> binary_image1 = threshold_adaptive(image, 15, 'mean') >>> func = lambda arr: arr.mean() >>> binary_image2 = threshold_adaptive(image, 15, 'generic', param=func) """ assert_nD(image, 2) thresh_image = np.zeros(image.shape, 'double') if method == 'generic': ndi.generic_filter(image, param, block_size, output=thresh_image, mode=mode) elif method == 'gaussian': if param is None: # automatically determine sigma which covers > 99% of distribution sigma = (block_size - 1) / 6.0 else: sigma = param ndi.gaussian_filter(image, sigma, output=thresh_image, mode=mode) elif method == 'mean': mask = 1. / block_size * np.ones((block_size,)) # separation of filters to speedup convolution ndi.convolve1d(image, mask, axis=0, output=thresh_image, mode=mode) ndi.convolve1d(thresh_image, mask, axis=1, output=thresh_image, mode=mode) elif method == 'median': ndi.median_filter(image, block_size, output=thresh_image, mode=mode) return image > (thresh_image - offset) def threshold_otsu(image, nbins=256): """Return threshold value based on Otsu's method. Parameters ---------- image : array Input image. nbins : int, optional Number of bins used to calculate histogram. This value is ignored for integer arrays. Returns ------- threshold : float Upper threshold value. All pixels intensities that less or equal of this value assumed as foreground. References ---------- .. [1] Wikipedia, http://en.wikipedia.org/wiki/Otsu's_Method Examples -------- >>> from skimage.data import camera >>> image = camera() >>> thresh = threshold_otsu(image) >>> binary = image <= thresh """ hist, bin_centers = histogram(image.ravel(), nbins) hist = hist.astype(float) # class probabilities for all possible thresholds weight1 = np.cumsum(hist) weight2 = np.cumsum(hist[::-1])[::-1] # class means for all possible thresholds mean1 = np.cumsum(hist * bin_centers) / weight1 mean2 = (np.cumsum((hist * bin_centers)[::-1]) / weight2[::-1])[::-1] # Clip ends to align class 1 and class 2 variables: # The last value of `weight1`/`mean1` should pair with zero values in # `weight2`/`mean2`, which do not exist. variance12 = weight1[:-1] * weight2[1:] * (mean1[:-1] - mean2[1:]) ** 2 idx = np.argmax(variance12) threshold = bin_centers[:-1][idx] return threshold def threshold_yen(image, nbins=256): """Return threshold value based on Yen's method. Parameters ---------- image : array Input image. nbins : int, optional Number of bins used to calculate histogram. This value is ignored for integer arrays. Returns ------- threshold : float Upper threshold value. All pixels intensities that less or equal of this value assumed as foreground. References ---------- .. [1] Yen J.C., Chang F.J., and Chang S. (1995) "A New Criterion for Automatic Multilevel Thresholding" IEEE Trans. on Image Processing, 4(3): 370-378 .. [2] Sezgin M. and Sankur B. (2004) "Survey over Image Thresholding Techniques and Quantitative Performance Evaluation" Journal of Electronic Imaging, 13(1): 146-165, http://www.busim.ee.boun.edu.tr/~sankur/SankurFolder/Threshold_survey.pdf .. [3] ImageJ AutoThresholder code, http://fiji.sc/wiki/index.php/Auto_Threshold Examples -------- >>> from skimage.data import camera >>> image = camera() >>> thresh = threshold_yen(image) >>> binary = image <= thresh """ hist, bin_centers = histogram(image.ravel(), nbins) # On blank images (e.g. filled with 0) with int dtype, `histogram()` # returns `bin_centers` containing only one value. Speed up with it. if bin_centers.size == 1: return bin_centers[0] # Calculate probability mass function pmf = hist.astype(np.float32) / hist.sum() P1 = np.cumsum(pmf) # Cumulative normalized histogram P1_sq = np.cumsum(pmf ** 2) # Get cumsum calculated from end of squared array: P2_sq = np.cumsum(pmf[::-1] ** 2)[::-1] # P2_sq indexes is shifted +1. I assume, with P1[:-1] it's help avoid '-inf' # in crit. ImageJ Yen implementation replaces those values by zero. crit = np.log(((P1_sq[:-1] * P2_sq[1:]) ** -1) * (P1[:-1] * (1.0 - P1[:-1])) ** 2) return bin_centers[crit.argmax()] def threshold_isodata(image, nbins=256, return_all=False): """Return threshold value(s) based on ISODATA method. Histogram-based threshold, known as Ridler-Calvard method or inter-means. Threshold values returned satisfy the following equality: `threshold = (image[image <= threshold].mean() +` `image[image > threshold].mean()) / 2.0` That is, returned thresholds are intensities that separate the image into two groups of pixels, where the threshold intensity is midway between the mean intensities of these groups. For integer images, the above equality holds to within one; for floating- point images, the equality holds to within the histogram bin-width. Parameters ---------- image : array Input image. nbins : int, optional Number of bins used to calculate histogram. This value is ignored for integer arrays. return_all: bool, optional If False (default), return only the lowest threshold that satisfies the above equality. If True, return all valid thresholds. Returns ------- threshold : float or int or array Threshold value(s). References ---------- .. [1] Ridler, TW & Calvard, S (1978), "Picture thresholding using an iterative selection method" .. [2] IEEE Transactions on Systems, Man and Cybernetics 8: 630-632, http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=4310039 .. [3] Sezgin M. and Sankur B. (2004) "Survey over Image Thresholding Techniques and Quantitative Performance Evaluation" Journal of Electronic Imaging, 13(1): 146-165, http://www.busim.ee.boun.edu.tr/~sankur/SankurFolder/Threshold_survey.pdf .. [4] ImageJ AutoThresholder code, http://fiji.sc/wiki/index.php/Auto_Threshold Examples -------- >>> from skimage.data import coins >>> image = coins() >>> thresh = threshold_isodata(image) >>> binary = image > thresh """ hist, bin_centers = histogram(image.ravel(), nbins) # image only contains one unique value if len(bin_centers) == 1: if return_all: return bin_centers else: return bin_centers[0] hist = hist.astype(np.float32) # csuml and csumh contain the count of pixels in that bin or lower, and # in all bins strictly higher than that bin, respectively csuml = np.cumsum(hist) csumh = np.cumsum(hist[::-1])[::-1] - hist # intensity_sum contains the total pixel intensity from each bin intensity_sum = hist * bin_centers # l and h contain average value of all pixels in that bin or lower, and # in all bins strictly higher than that bin, respectively. # Note that since exp.histogram does not include empty bins at the low or # high end of the range, csuml and csumh are strictly > 0, except in the # last bin of csumh, which is zero by construction. # So no worries about division by zero in the following lines, except # for the last bin, but we can ignore that because no valid threshold # can be in the top bin. So we just patch up csumh[-1] to not cause 0/0 # errors. csumh[-1] = 1 l = np.cumsum(intensity_sum) / csuml h = (np.cumsum(intensity_sum[::-1])[::-1] - intensity_sum) / csumh # isodata finds threshold values that meet the criterion t = (l + m)/2 # where l is the mean of all pixels <= t and h is the mean of all pixels # > t, as calculated above. So we are looking for places where # (l + m) / 2 equals the intensity value for which those l and m figures # were calculated -- which is, of course, the histogram bin centers. # We only require this equality to be within the precision of the bin # width, of course. all_mean = (l + h) / 2.0 bin_width = bin_centers[1] - bin_centers[0] # Look only at thresholds that are below the actual all_mean value, # for consistency with the threshold being included in the lower pixel # group. Otherwise can get thresholds that are not actually fixed-points # of the isodata algorithm. For float images, this matters less, since # there really can't be any guarantees anymore anyway. distances = all_mean - bin_centers thresholds = bin_centers[(distances >= 0) & (distances < bin_width)] if return_all: return thresholds else: return thresholds[0] def threshold_li(image): """Return threshold value based on adaptation of Li's Minimum Cross Entropy method. Parameters ---------- image : array Input image. Returns ------- threshold : float Upper threshold value. All pixels intensities more than this value are assumed to be foreground. References ---------- .. [1] Li C.H. and Lee C.K. (1993) "Minimum Cross Entropy Thresholding" Pattern Recognition, 26(4): 617-625 .. [2] Li C.H. and Tam P.K.S. (1998) "An Iterative Algorithm for Minimum Cross Entropy Thresholding" Pattern Recognition Letters, 18(8): 771-776 .. [3] Sezgin M. and Sankur B. (2004) "Survey over Image Thresholding Techniques and Quantitative Performance Evaluation" Journal of Electronic Imaging, 13(1): 146-165 http://citeseer.ist.psu.edu/sezgin04survey.html .. [4] ImageJ AutoThresholder code, http://fiji.sc/wiki/index.php/Auto_Threshold Examples -------- >>> from skimage.data import camera >>> image = camera() >>> thresh = threshold_li(image) >>> binary = image > thresh """ # Copy to ensure input image is not modified image = image.copy() # Requires positive image (because of log(mean)) immin = np.min(image) image -= immin imrange = np.max(image) tolerance = 0.5 * imrange / 256 # Calculate the mean gray-level mean = np.mean(image) # Initial estimate new_thresh = mean old_thresh = new_thresh + 2 * tolerance # Stop the iterations when the difference between the # new and old threshold values is less than the tolerance while abs(new_thresh - old_thresh) > tolerance: old_thresh = new_thresh threshold = old_thresh + tolerance # range # Calculate the means of background and object pixels mean_back = image[image <= threshold].mean() mean_obj = image[image > threshold].mean() temp = (mean_back - mean_obj) / (np.log(mean_back) - np.log(mean_obj)) if temp < 0: new_thresh = temp - tolerance else: new_thresh = temp + tolerance return threshold + immin
jwiggins/scikit-image
skimage/filters/thresholding.py
Python
bsd-3-clause
13,792
[ "Gaussian" ]
cbb61b108de7155ac9af9f1f9a170dea8d726ed37693c475401322d71c552af9
############################### # # (c) Vlad Zat 2017 # Student No: C14714071 # Course: DT228 # Date: 04-10-2017 # # Title: Testing Feature Detection Algorithms import numpy as np import cv2 import easygui # help(cv2.drawKeypoints) # help(cv2.drawMatches) imagesPath = 'images/' outputPath = 'output/' fileExtension = '.jpg' I1 = cv2.imread(imagesPath + 'pcb1.jpg') I2 = cv2.imread(imagesPath + 'pcb2.jpg') G1 = cv2.cvtColor(I1, cv2.COLOR_BGR2GRAY) G2 = cv2.cvtColor(I2, cv2.COLOR_BGR2GRAY) def displayFAST(window, image, nms=1): # FAST - Features from Accelerated Segment Test # Reference - http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_feature2d/py_fast/py_fast.html fast = cv2.FastFeatureDetector_create() # Set maximum suppression - nms ignores unimportant pixels in corners that are not the the local maxima # Reference - http://users.ecs.soton.ac.uk/msn/book/new_demo/nonmax/ fast.setNonmaxSuppression(nms) # find corners keyPoints = fast.detect(image, None) corners = image.copy() corners = cv2.drawKeypoints(image = image, keypoints = keyPoints, outImage = corners, color = (0, 0, 255)) cv2.imwrite(outputPath + window + fileExtension, corners) cv2.imshow(window, corners) def displayORB(window, image): # ORB - Oriented FAST and Rotated BRIEF # Reference - http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_feature2d/py_orb/py_orb.html#orb orb = cv2.ORB_create() keyPoints = orb.detect(image, None) keyPoints, descriptor = orb.compute(image, keyPoints) newImage = image.copy() newImage = cv2.drawKeypoints(image = image, keypoints = keyPoints, outImage = newImage, color = (0, 0, 255), flags = 0) cv2.imwrite(outputPath + window + fileExtension, newImage) cv2.imshow(window, newImage) def displayAKAZE(window, image): # AKAZE # Reference - http://docs.opencv.org/3.0-beta/doc/tutorials/features2d/akaze_matching/akaze_matching.html akaze = cv2.AKAZE_create() keyPoints = akaze.detect(image, None) newImage = image.copy() newImage = cv2.drawKeypoints(image = image, keypoints = keyPoints, outImage = newImage, color = (0, 0, 255), flags = 0) cv2.imwrite(outputPath + window + fileExtension, newImage) cv2.imshow(window, newImage) def showDifs(window, image1, keyPoints1, desc1, image2, keyPoints2, desc2): # Feature Matching # Reference - http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_feature2d/py_matcher/py_matcher.html # BFMatcher bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck = True) matches = bf.match(desc1, desc2) matches = sorted(matches, key = lambda x:x.distance) matchesImage = image1.copy() matchesImage = cv2.drawMatches(img1 = image1, keypoints1 = keyPoints1, img2 = image2, keypoints2 = keyPoints2, matches1to2 = matches[:250], outImg = matchesImage, flags=2) cv2.imwrite(outputPath + window + fileExtension, matchesImage) cv2.imshow(window, matchesImage) # displayFAST('fast1', I1) # displayFAST('fast2', I2) # # displayORB('orb1', I1) # displayORB('orb2', I2) # # displayAKAZE('akaze1', I1) # displayAKAZE('akaze2', I2) # Get edges from images with Canny Edge Detection - TODO: test with multiple feature detection algorithms # Reference http://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_imgproc/py_canny/py_canny.html # edges1 = cv2.Canny(I1, 150, 200) # edges2 = cv2.Canny(I2, 150, 200) # cv2.imshow('canny1', edges1) # cv2.imshow('canny2', edges2) # Noise Reduction using 5x5 Gaussian filter ? # ORB # orb = cv2.ORB_create() # keyPoints1, desc1 = orb.detectAndCompute(I1, None) # keyPoints2, desc2 = orb.detectAndCompute(I2, None) # AKAZE akaze = cv2.AKAZE_create() keyPoints1, desc1 = akaze.detectAndCompute(I1, None) keyPoints2, desc2 = akaze.detectAndCompute(I2, None) showDifs('akazeDif', I1, keyPoints1, desc1, I2, keyPoints2, desc2) key = cv2.waitKey(0)
vzat/comparing_images
tests/featureDetectionTest.py
Python
mit
3,914
[ "Gaussian" ]
007d54b489a2a40c0f9e80d04e6827543d2b32be91d70b708de2e9ed30b2fac7
"""Sensor that can display the current Home Assistant versions.""" import logging from datetime import timedelta import voluptuous as vol import homeassistant.helpers.config_validation as cv from homeassistant.helpers.aiohttp_client import async_get_clientsession from homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.const import CONF_NAME, CONF_SOURCE from homeassistant.helpers.entity import Entity from homeassistant.util import Throttle _LOGGER = logging.getLogger(__name__) ALL_IMAGES = [ 'default', 'intel-nuc', 'qemux86', 'qemux86-64', 'qemuarm', 'qemuarm-64', 'raspberrypi', 'raspberrypi2', 'raspberrypi3', 'raspberrypi3-64', 'tinker', 'odroid-c2', 'odroid-xu' ] ALL_SOURCES = [ 'local', 'pypi', 'hassio', 'docker' ] CONF_BETA = 'beta' CONF_IMAGE = 'image' DEFAULT_IMAGE = 'default' DEFAULT_NAME_LATEST = "Latest Version" DEFAULT_NAME_LOCAL = "Current Version" DEFAULT_SOURCE = 'local' ICON = 'mdi:package-up' TIME_BETWEEN_UPDATES = timedelta(minutes=5) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Optional(CONF_BETA, default=False): cv.boolean, vol.Optional(CONF_IMAGE, default=DEFAULT_IMAGE): vol.In(ALL_IMAGES), vol.Optional(CONF_NAME, default=''): cv.string, vol.Optional(CONF_SOURCE, default=DEFAULT_SOURCE): vol.In(ALL_SOURCES), }) async def async_setup_platform( hass, config, async_add_entities, discovery_info=None): """Set up the Version sensor platform.""" from pyhaversion import Version beta = config.get(CONF_BETA) image = config.get(CONF_IMAGE) name = config.get(CONF_NAME) source = config.get(CONF_SOURCE) session = async_get_clientsession(hass) if beta: branch = 'beta' else: branch = 'stable' haversion = VersionData(Version(hass.loop, session, branch, image), source) async_add_entities([VersionSensor(haversion, name)], True) class VersionSensor(Entity): """Representation of a Home Assistant version sensor.""" def __init__(self, haversion, name=''): """Initialize the Version sensor.""" self.haversion = haversion self._name = name self._state = None async def async_update(self): """Get the latest version information.""" await self.haversion.async_update() @property def name(self): """Return the name of the sensor.""" if self._name: return self._name if self.haversion.source == DEFAULT_SOURCE: return DEFAULT_NAME_LOCAL return DEFAULT_NAME_LATEST @property def state(self): """Return the state of the sensor.""" return self.haversion.api.version @property def device_state_attributes(self): """Return attributes for the sensor.""" return self.haversion.api.version_data @property def icon(self): """Return the icon to use in the frontend, if any.""" return ICON class VersionData: """Get the latest data and update the states.""" def __init__(self, api, source): """Initialize the data object.""" self.api = api self.source = source @Throttle(TIME_BETWEEN_UPDATES) async def async_update(self): """Get the latest version information.""" if self.source == 'pypi': await self.api.get_pypi_version() elif self.source == 'hassio': await self.api.get_hassio_version() elif self.source == 'docker': await self.api.get_docker_version() else: await self.api.get_local_version()
molobrakos/home-assistant
homeassistant/components/version/sensor.py
Python
apache-2.0
3,583
[ "TINKER" ]
a82e6ad339fa2054f55469b680f4dbf5fd2562410ac87fe2af5cc9401c3b84d1