text
stringlengths
12
1.05M
repo_name
stringlengths
5
86
path
stringlengths
4
191
language
stringclasses
1 value
license
stringclasses
15 values
size
int32
12
1.05M
keyword
listlengths
1
23
text_hash
stringlengths
64
64
"""Function that for given neural network, returns lists of numbers of edges, weights and neurons per layer. """ from athenet.models import lenet, alexnet from athenet.layers import FullyConnectedLayer, ConvolutionalLayer, \ ActivationLayer, Dropout, Softmax, MaxPool, LRN import numpy as np def _conv_edges(x, y, fx, fy, sx, sy, n_in, n_out, g=1): """Number of edges in convolution.""" nx = (x - fx) / sx + 1 ny = (y - fy) / sy + 1 return (fx * fy * nx * ny * n_in * n_out) / g def _max_pool_edges(x, y, fx, fy, sx, sy, n_channels): """Number of edges in max pool.""" return _conv_edges(x, y, fx, fy, sx, sy, n_channels, 1) def _lrn_edges(x, y, n_channel, lr): """Number of edges in lrn.""" return x * y * (n_channel * lr - (lr ** 2 - 1) / 4) def count_statistics(network): """For given neural network, returns lists of numbers of edges, weights and neurons per layer. :param network: neural network in which edges, weights and layers will be counted. For n layers, list of edges and weights are of length n and list of neurons is of length (n + 1). List of edges tells how many connections are between inputs and outputs of layers. List of weights tells how many elements are in weight matrices in layers. List of neurons tells how many elements are in inputs/outputs of the layers. Note: Number of edges in softmax for input size N is N in this calculation. It comes from the fact that softmax need O(N) operations to calculate output from input.""" layers = network.layers n_edges, n_weights, n_neurons = [], [], [] n_neurons += [np.prod(layers[0].input_shape)] for layer in layers: n_neurons += [np.prod(layer.output_shape)] if isinstance(layer, FullyConnectedLayer): n_weights += [np.prod(layer.W.shape)] elif isinstance(layer, ConvolutionalLayer): n_weights += [np.prod(layer.W.shape)] else: n_weights += [0] if isinstance(layer, ConvolutionalLayer): x, y, n_in = layer.image_shape fx, fy, n_out = layer.filter_shape sx, sy = layer.stride g = layer.n_groups n_edges += [_conv_edges(x, y, fx, fy, sx, sy, n_in, n_out, g)] continue if isinstance(layer, FullyConnectedLayer): n_edges += [layer.n_in * layer.n_out] continue if isinstance(layer, (ActivationLayer, Dropout, Softmax)): n_edges += [np.prod(layer.input_shape)] continue if isinstance(layer, MaxPool): fx, fy = layer.poolsize sx, sy = layer.stride x, y, n_channels = layer.input_shape n_edges += [_max_pool_edges(x, y, fx, fy, sx, sy, n_channels)] continue if isinstance(layer, LRN): x, y, n_channel = layer.input_shape lr = layer.local_range n_edges += [_lrn_edges(x, y, n_channel, lr)] continue raise ValueError("illegal layer type for counting elts") return (n_edges, n_weights, n_neurons) if __name__ == "__main__": """Count number of edges, weights and neurons in LeNet and AlexNet""" print """LeNet""" lenet_network = lenet(trained=False) edges, weights, neurons = count_statistics(lenet_network) for l, edge, weight, neuron in zip(lenet_network.layers, edges, weights, neurons): print type(l), l.output_shape print 'edges:', edge, 'weights:', weight, 'neurons:', neuron print 'sum of edges:', sum(edges) print 'sum of weigths:', sum(weights) print 'sum of neurons:', sum(neurons) print """AlexNet""" alexnet_network = alexnet(trained=False) edges, weights, neurons = count_statistics(alexnet_network) for l, edge, weight, neuron in zip(alexnet_network.layers, edges, weights, neurons): print type(l), l.output_shape print 'edges:', edge, 'weights:', weight, 'neurons:', neuron print 'sum of edges:', sum(edges) print 'sum of weigths:', sum(weights) print 'sum of neurons:', sum(neurons)
heurezjusz/Athenet
athenet/utils/count_n_elts.py
Python
bsd-2-clause
4,211
[ "NEURON" ]
86252e50ebffa064d71ef78a28731061838324962d5a799e8a45c46bdd8354e9
#!/usr/bin/env python # coding=utf-8 """556. Squarefree Gaussian Integers https://projecteuler.net/problem=556 A **Gaussian integer** is a number z = a \+ bi where a, b are integers and i2 = -1. Gaussian integers are a subset of the complex numbers, and the integers are the subset of Gaussian integers for which b = 0. A Gaussian integer **unit** is one for which a2 \+ b2 = 1, i.e. one of 1, i, -1, -i. Let's define a _proper_ Gaussian integer as one for which a > 0 and b ≥ 0. A Gaussian integer z1 = a1 \+ b1i is said to be divisible by z2 = a2 \+ b2i if z3 = a3 \+ b3i = z1/z2 is a Gaussian integer. $\frac {z_1} {z_2} = \frac {a_1 + b_1 i} {a_2 + b_2 i} = \frac {(a_1 + b_1 i)(a_2 - b_2 i)} {(a_2 + b_2 i)(a_2 - b_2 i)} = \frac {a_1 a_2 + b_1 b_2} {a_2^2 + b_2^2} + \frac {a_2 b_1 - a_1 b_2} {a_2^2 + b_2^2}i = a_3 + b_3 i$ So, z1 is divisible by z2 if $\frac {a_1 a_2 + b_1 b_2} {a_2^2 + b_2^2}$ and $\frac {a_2 b_1 - a_1 b_2} {a_2^2 + b_2^2}$ are integers. For example, 2 is divisible by 1 + i because 2/(1 + i) = 1 - i is a Gaussian integer. A **Gaussian prime** is a Gaussian integer that is divisible only by a unit, itself or itself times a unit. For example, 1 + 2i is a Gaussian prime, because it is only divisible by 1, i, -1, -i, 1 + 2i, i(1 + 2i) = i - 2, -(1 + 2i) = -1 - 2i and -i(1 + 2i) = 2 - i. 2 is not a Gaussian prime as it is divisible by 1 + i. A Gaussian integer can be uniquely factored as the product of a unit and proper Gaussian primes. For example 2 = -i(1 + i)2 and 1 + 3i = (1 + i)(2 + i). A Gaussian integer is said to be squarefree if its prime factorization does not contain repeated proper Gaussian primes. So 2 is not squarefree over the Gaussian integers, but 1 + 3i is. Units and Gaussian primes are squarefree by definition. Let f(n) be the count of proper squarefree Gaussian integers with a2 \+ b2 ≤ n. For example f(10) = 7 because 1, 1 + i, 1 + 2i, 1 + 3i = (1 + i)(2 + i), 2 + i, 3 and 3 + i = -i(1 + i)(1 + 2i) are squarefree, while 2 = -i(1 + i)2 and 2 + 2i = -i(1 + i)3 are not. You are given f(102) = 54, f(104) = 5218 and f(108) = 52126906. Find f(1014). """
openqt/algorithms
projecteuler/pe556-squarefree-gaussian-integers.py
Python
gpl-3.0
2,153
[ "Gaussian" ]
9de52894d03a452c2eeaed97f56e68b2792117c97f81ae8c816fbfa217a105cf
# $HeadURL$ """ The SitesDIRACGOCDBmapping module performs the necessary CS gymnastics to resolve sites DIRAC-GOCDB names. Assumes CS structure of: /Resources/Sites/<GRIDNAME>/<SITENAME> """ __RCSID__ = "$Id$" from DIRAC import gConfig, S_OK, S_ERROR from DIRAC.ConfigurationSystem.Client.Helpers.Path import cfgPath ############################################################################# def getGOCSiteName( diracSiteName ): """ Get GOC DB site name, given the DIRAC site name, as it stored in the CS :params: :attr:`diracSiteName` - string: DIRAC site name (e.g. 'LCG.CERN.ch') """ gocDBName = gConfig.getValue( '/Resources/Sites/%s/%s/Name' % ( diracSiteName.split( '.' )[0], diracSiteName ) ) if not gocDBName: return S_ERROR( "No GOC site name for %s in CS (Not a grid site ?)" % diracSiteName ) else: return S_OK( gocDBName ) ############################################################################# def getDIRACSiteName( gocSiteName ): """ Get DIRAC site name, given the GOC DB site name, as it stored in the CS :params: :attr:`gocSiteName` - string: GOC DB site name (e.g. 'CERN-PROD') """ diracSites = [] result = gConfig.getSections( "/Resources/Sites" ) if not result['OK']: return result gridList = result['Value'] for grid in gridList: result = gConfig.getSections( "/Resources/Sites/%s" % grid ) if not result['OK']: return result sitesList = result['Value'] tmpList = [(site, gConfig.getValue( "/Resources/Sites/%s/%s/Name" % ( grid, site ) ) ) for site in sitesList] diracSites += [dirac for (dirac, goc) in tmpList if goc == gocSiteName] if diracSites: return S_OK( diracSites ) return S_ERROR( "There's no site with GOCDB name = %s in DIRAC CS" % gocSiteName ) def getDIRACSesForSRM( srmService ): result = gConfig.getSections( "/Resources/StorageElements" ) if not result['OK']: return result diracSEs = result['Value'] resultDIRACSEs = [] for se in diracSEs: seSection = "/Resources/StorageElements/%s" % se result = gConfig.getSections( seSection ) if not result['OK']:\ continue accesses = result['Value'] for access in accesses: protocol = gConfig.getValue( cfgPath( seSection, access, 'Protocol'), 'Unknown' ) if protocol == 'srm': seHost = gConfig.getValue( cfgPath( seSection, access, 'Host'), 'Unknown' ) if seHost == srmService: resultDIRACSEs.append( se ) return S_OK( resultDIRACSEs ) #EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#
calancha/DIRAC
Core/Utilities/SitesDIRACGOCDBmapping.py
Python
gpl-3.0
2,683
[ "DIRAC" ]
37267c896bad20469bda991f6910bbadc0c755ae47839ffa031dc1b0487b47a1
#!/usr/bin/env python import math import vtk def get_program_parameters(): import argparse description = 'Display a capped sphere.' epilogue = ''' ''' parser = argparse.ArgumentParser(description=description, epilog=epilogue, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('angle', default=90, type=float, nargs='?', help='The length of the arc in degrees from +z to -z in the +x direction in the x-z plane.') parser.add_argument('step', default=1, type=float, nargs='?', help='Step size in degrees.') parser.add_argument('radius', default=1, type=float, nargs='?', help='Radius of the arc.') parser.add_argument('-u', '--uncapped', action='store_true', help='Uncap the sphere.') parser.add_argument('-s', '--show_line', action='store_true', help='Show the line that is rotationally extruded to make the surface.') args = parser.parse_args() return args.angle, args.step, args.radius, args.uncapped, args.show_line def main(): angle, step, radius, uncapped, show_line = get_program_parameters() angle = math.radians(abs(angle)) step = math.radians(abs(step)) radius = abs(radius) # With default settings set this to 45 and you get a bowl with a flat bottom. start = math.radians(90) pts = get_line(angle, step, radius, uncapped, start) # Setup points and lines points = vtk.vtkPoints() lines = vtk.vtkCellArray() for pt in pts: pt_id = points.InsertNextPoint(pt) if pt_id < len(pts) - 1: line = vtk.vtkLine() line.GetPointIds().SetId(0, pt_id) line.GetPointIds().SetId(1, pt_id + 1) lines.InsertNextCell(line) polydata = vtk.vtkPolyData() polydata.SetPoints(points) polydata.SetLines(lines) # Extrude the profile to make the capped sphere extrude = vtk.vtkRotationalExtrusionFilter() extrude.SetInputData(polydata) extrude.SetResolution(60) # Visualize colors = vtk.vtkNamedColors() # To see the line lineMapper = vtk.vtkPolyDataMapper() lineMapper.SetInputData(polydata) lineActor = vtk.vtkActor() lineActor.SetMapper(lineMapper) lineActor.GetProperty().SetLineWidth(4) lineActor.GetProperty().SetColor(colors.GetColor3d("Red")) # To see the surface surfaceMapper = vtk.vtkPolyDataMapper() surfaceMapper.SetInputConnection(extrude.GetOutputPort()) surfaceActor = vtk.vtkActor() surfaceActor.SetMapper(surfaceMapper) surfaceActor.GetProperty().SetColor(colors.GetColor3d('Khaki')) ren = vtk.vtkRenderer() renWin = vtk.vtkRenderWindow() renWin.AddRenderer(ren) iren = vtk.vtkRenderWindowInteractor() iren.SetRenderWindow(renWin) ren.AddActor(surfaceActor) if show_line: ren.AddActor(lineActor) ren.SetBackground(colors.GetColor3d('LightSlateGray')) ren.ResetCamera() ren.GetActiveCamera().Azimuth(0) ren.GetActiveCamera().Elevation(60) ren.ResetCameraClippingRange() renWin.SetSize(600, 600) renWin.Render() renWin.SetWindowName('CappedSphere') iren.Start() def get_line(angle, step, radius, uncapped, start): """ Get the points for a line. :param angle: Length of the arc in degrees. :param step: Step size in degrees. :param radius: Radius of the arc. :param uncapped: True if uncapped. :param start: Starting angle. :return: A vector of points. """ precision = 1.0e-6 pts = list() # Do the curved line theta = 0.0 while theta <= angle: x = radius * math.cos(start - theta) z = radius * math.sin(theta - start) if x < 0: x = 0 pts.append((x, 0, z)) break if abs(x) < precision: x = 0 if abs(z) < precision: z = 0 pts.append((x, 0, z)) theta += step if not uncapped: # Drop a perpendicular from the last point to the x-axis if len(pts) > 1: if pts[-1][0] > 0: last_point = pts[-1] num_pts = 10 interval = float(num_pts) / radius for i in range(1, num_pts): x = last_point[0] - i / interval z = last_point[2] if x < 0: x = 0 pts.append((x, 0, z)) break if abs(x) < precision: x = 0 if abs(z) < precision: z = 0 pts.append((x, 0, z)) if pts[-1][0] > precision: pts.append((0, 0, pts[-1][2])) return pts if __name__ == '__main__': main()
lorensen/VTKExamples
src/Python/Modelling/CappedSphere.py
Python
apache-2.0
4,832
[ "VTK" ]
02b652e5bdcf25e516aeae8abc1ac798145407ebb1ea18ca133677505574cfff
# Copyright Iris contributors # # This file is part of Iris and is released under the LGPL license. # See COPYING and COPYING.LESSER in the root of the repository for full # licensing details. """Unit tests for the `iris.config.NetCDF` class.""" # Import iris.tests first so that some things can be initialised before # importing anything else. import iris.tests as tests import warnings import iris.config class Test(tests.IrisTest): def setUp(self): self.options = iris.config.NetCDF() def test_basic(self): self.assertFalse(self.options.conventions_override) def test_enabled(self): self.options.conventions_override = True self.assertTrue(self.options.conventions_override) def test_bad_value(self): # A bad value should be ignored and replaced with the default value. bad_value = "wibble" with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") self.options.conventions_override = bad_value self.assertFalse(self.options.conventions_override) exp_wmsg = "Attempting to set invalid value {!r}".format(bad_value) self.assertRegex(str(w[0].message), exp_wmsg) def test__contextmgr(self): with self.options.context(conventions_override=True): self.assertTrue(self.options.conventions_override) self.assertFalse(self.options.conventions_override) if __name__ == "__main__": tests.main()
pp-mo/iris
lib/iris/tests/unit/config/test_NetCDF.py
Python
lgpl-3.0
1,478
[ "NetCDF" ]
59b0290b101d17f79a0dfd97d8d3e907629d30c2284c4fee3c9a6d6a1c728e50
#!/usr/bin/env python # -*- coding: utf-8 -*- # For bug reports, feature and support requests please visit # <https://github.com/mkalewski/sim2net/issues>. """ sim2net -- simulation application file. If in any doubt, refer to the technical documentations that is available on the Internet: <https://sim2net.readthedocs.org/en/latest/>. """ from sim2net.application import Application class HelloWorld(Application): """ A "Hello World" example with two nodes: the node with ID equal 0 sends a message that should be received and printed by the node with ID equal to 1. (See also the ``configuration.py`` file.) For more information about the methods that follows refer to the technical documentation: """ def initialize(self, node_id, shared): """ Initialization method. """ self.__node_id = node_id print '[node %d] initialize' % self.__node_id def finalize(self, shared): """ Finalization method. """ print '[node %d] finalize' % self.__node_id def failure(self, time, shared): """ This method is called only if the node crashes. """ print ('[node %d] failure @ (%d, %2f)' % (self.__node_id, time[0], time[1])) def main(self, time, communication, neighbors, shared): """ This method is called at each simulation step. """ if self.__node_id == 0 and time[0] == 1: communication.send('Hello World!') while True: msg = communication.receive() if msg is None: break print ('[node %d] message from node %d: "%s"' % (self.__node_id, msg[0], msg[1]))
mkalewski/sim2net
sim2net/cli/_application_template.py
Python
mit
1,745
[ "VisIt" ]
6c694fc99a896468df77c44fbbe0003c7045d769f04497c8a7107869bef0c0c6
# coding=utf-8 from django.test import TestCase from feedback.models import Person, Veranstaltung, Semester, Kommentar from feedback.forms import KommentarModelForm class KommentarModelFormTest(TestCase): def setUp(self): self.p = [] self.p.append(Person.objects.create(vorname='Eric', nachname='Idle')) self.p.append(Person.objects.create(vorname='John', nachname='Cleese')) self.s = Semester.objects.create(semester=20120) self.v = Veranstaltung.objects.create(typ='v', name='Life of Brian', semester=self.s, evaluieren=True, grundstudium=False) self.v.veranstalter.add(self.p[0]) self.k = Kommentar.objects.create(veranstaltung=self.v, autor=self.p[0], text='Great!') def test_init(self): with self.assertRaises(KeyError): KommentarModelForm(instance=self.k) forms = [] forms.append(KommentarModelForm(veranstaltung=self.v)) forms.append(KommentarModelForm(instance=self.k, veranstaltung=self.v)) for f in forms: self.assertCountEqual(f.fields['autor'].queryset.all(), (self.p[0],))
d120/pyfeedback
src/feedback/tests/test_forms.py
Python
agpl-3.0
1,166
[ "Brian" ]
df6cea4967381b6d11c5d25ca981cecd40b24edd25a3c06b05e326940938136b
import tensorflow as tf import numpy as np def normalize(v): norm=np.linalg.norm(v) if norm==0: return v return v/norm def readToLines(file): csvFile=open(file) lines=csvFile.read().splitlines() csvFile.close() splitLines=[] for line in lines: splitLines+=[line.split(',')] return splitLines FEATURES=22 NUM_CLASSES=2 hidden1_num_neurons=2 #neurons in first layer output_num_neurons=NUM_CLASSES #neurons in second (output) layer. Each neuron corresponds to a digit. The classification is the order of the #output neuron with the highest activation #function to read MNIST images and labels into numpy matrices def loadData(file): splitLines=readToLines(file) voiceFeatures=np.zeros([len(splitLines)-1, FEATURES]) labels=np.zeros([len(splitLines)-1, NUM_CLASSES]) for dataInd in range(1, len(splitLines)): splitLine=splitLines[dataInd] voiceFeatures[dataInd-1, :16]=splitLine[1:17] voiceFeatures[dataInd-1, 16:]=splitLine[18:] labels[dataInd-1, int(splitLine[17])]=1.0 for col in range(0, len(voiceFeatures[0])): max=0.0 min=float('inf') for row in range(0, len(voiceFeatures)): if(voiceFeatures[row,col]>max): max=voiceFeatures[row,col] if(voiceFeatures[row,col]<min): min=voiceFeatures[row,col] for row in range(0, len(voiceFeatures)): voiceFeatures[row,col]+=min voiceFeatures[row,col]/=(max-min) shuffle=np.random.permutation(len(splitLines)-1) return voiceFeatures[shuffle[0:int(3*(len(splitLines)-1)/4)]], labels[shuffle[0:int(3*(len(splitLines)-1)/4)]], voiceFeatures[shuffle[int(3*(len(splitLines)-1)/4):]], labels[shuffle[int(3*(len(splitLines)-1)/4):]] trainVoiceFeatures, trainLabels, validateVoiceFeatures, validateLabels=loadData('/home/willie/workspace/TensorFlowWorkshop/data/Parkinsons/parkinsons.data') x = tf.placeholder(tf.float32, [None, FEATURES]) W = tf.Variable(tf.zeros([FEATURES, NUM_CLASSES])) b = tf.Variable(tf.zeros([NUM_CLASSES])) y = tf.nn.softmax(tf.matmul(x, W) + b) y_ = tf.placeholder(tf.float32, [None, NUM_CLASSES]) cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1])) train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy) correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) sess = tf.InteractiveSession() tf.initialize_all_variables().run() for _ in range(100): crossEntropy, acc, _=sess.run([cross_entropy, accuracy, train_step], feed_dict={x: trainVoiceFeatures, y_: trainLabels}) print(crossEntropy, acc) print(sess.run(accuracy, feed_dict={x: validateVoiceFeatures, y_: validateLabels}))
theoryclub/tf_workshop
MNISTExample.py
Python
mit
2,835
[ "NEURON" ]
ef786d7a8d9ef98abe8f4a0c12d158f7fb2ee3995f7a03e773a0dddd349bf6f1
''' Copyright (C) 2015 Jacob Bieker, jacob@bieker.us, www.jacobbieker.com This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. ''' __author__ = 'Jacob' import requests import yaml # Authentication with Instagram from insights.io import config if __name__ != "__main__": configuration_files = config.import_yaml_files(".", ["access.yaml"]) access_config = configuration_files[0] else: configuration_files = config.import_yaml_files("..", ["access.yaml"]) acces_config = configuration_files[0] #Based on the get_access_token.py on Instagram's Github client_id = access_config.get('instagram').get('id') client_secret = access_config.get('instagram').get('secret') redirect_uri = access_config.get('instagram').get('redirect') scope = access_config.get('instagram').get('scope') # For basic, API seems to need to be set explicitly if not scope or scope == [""]: scope = ["basic"] authorization_url = "https://api.instagram.com/oauth/authorize/" payload = {"client_id": client_id, "redirect_uri": redirect_uri, "response_type": "token", "scope": scope} api_auth = requests.get(authorization_url, params=payload) api = InstagramAPI(client_id=client_id, client_secret=client_secret, redirect_uri=redirect_uri) #TODO make it so do not have to go to site and then come back print ("Visit this page and authorize access in your browser: " + redirect_uri) code = (str(input("Paste in code in query string after redirect: ").strip())) access_token = api.exchange_code_for_access_token(code) with open("../access.yaml", 'w') as access: access_config.get('instagram').get('token').set(access_token) access.write(yaml.dump(access_config, default_flow_style=False)) api = InstagramAPI(access_token=access_token, client_secret=client_secret) recent_media, next_ = api.user_recent_media(user_id="userid", count=10)
jacobbieker/Insights
insights/instagram/Instagram2SQLite.py
Python
gpl-2.0
2,463
[ "VisIt" ]
58e1cb76b6b74684d96b4e6062bf2b32004d3813d53e488bc76edc776849ce29
################################################################################ # Copyright (C) 2013-2014 Jaakko Luttinen # # This file is licensed under the MIT License. ################################################################################ """ Unit tests for `dot` module. """ import unittest import numpy as np import scipy from numpy import testing from ..dot import Dot, SumMultiply from ..gaussian import Gaussian, GaussianARD from bayespy.nodes import GaussianGamma from ...vmp import VB from bayespy.utils import misc from bayespy.utils import linalg from bayespy.utils import random from bayespy.utils.misc import TestCase class TestSumMultiply(TestCase): def test_parent_validity(self): """ Test that the parent nodes are validated properly in SumMultiply """ V = GaussianARD(1, 1) X = Gaussian(np.ones(1), np.identity(1)) Y = Gaussian(np.ones(3), np.identity(3)) Z = Gaussian(np.ones(5), np.identity(5)) A = SumMultiply(X, ['i']) self.assertEqual(A.dims, ((), ())) A = SumMultiply('i', X) self.assertEqual(A.dims, ((), ())) A = SumMultiply(X, ['i'], ['i']) self.assertEqual(A.dims, ((1,), (1,1))) A = SumMultiply('i->i', X) self.assertEqual(A.dims, ((1,), (1,1))) A = SumMultiply(X, ['i'], Y, ['j'], ['i','j']) self.assertEqual(A.dims, ((1,3), (1,3,1,3))) A = SumMultiply('i,j->ij', X, Y) self.assertEqual(A.dims, ((1,3), (1,3,1,3))) A = SumMultiply(V, [], X, ['i'], Y, ['i'], []) self.assertEqual(A.dims, ((), ())) A = SumMultiply(',i,i->', V, X, Y) self.assertEqual(A.dims, ((), ())) # Gaussian-gamma parents C = GaussianGamma(np.ones(3), np.identity(3), 1, 1) A = SumMultiply(Y, ['i'], C, ['i'], ['i']) self.assertEqual(A.dims, ((3,), (3,3), (), ())) A = SumMultiply('i,i->i', Y, C) self.assertEqual(A.dims, ((3,), (3,3), (), ())) C = GaussianGamma(np.ones(3), np.identity(3), 1, 1) A = SumMultiply(Y, ['i'], C, ['i'], []) self.assertEqual(A.dims, ((), (), (), ())) A = SumMultiply('i,i->', Y, C) self.assertEqual(A.dims, ((), (), (), ())) # Error: not enough inputs self.assertRaises(ValueError, SumMultiply) self.assertRaises(ValueError, SumMultiply, X) # Error: too many keys self.assertRaises(ValueError, SumMultiply, Y, ['i', 'j']) self.assertRaises(ValueError, SumMultiply, 'ij', Y) # Error: not broadcastable self.assertRaises(ValueError, SumMultiply, Y, ['i'], Z, ['i']) self.assertRaises(ValueError, SumMultiply, 'i,i', Y, Z) # Error: output key not in inputs self.assertRaises(ValueError, SumMultiply, X, ['i'], ['j']) self.assertRaises(ValueError, SumMultiply, 'i->j', X) # Error: non-unique input keys self.assertRaises(ValueError, SumMultiply, X, ['i','i']) self.assertRaises(ValueError, SumMultiply, 'ii', X) # Error: non-unique output keys self.assertRaises(ValueError, SumMultiply, X, ['i'], ['i','i']) self.assertRaises(ValueError, SumMultiply, 'i->ii', X) # String has too many '->' self.assertRaises(ValueError, SumMultiply, 'i->i->i', X) # String has too many input nodes self.assertRaises(ValueError, SumMultiply, 'i,i->i', X) # Same parent several times self.assertRaises(ValueError, SumMultiply, 'i,i->i', X, X) # Same parent several times via deterministic node Xh = SumMultiply('i->i', X) self.assertRaises(ValueError, SumMultiply, 'i,i->i', X, Xh) def test_message_to_child(self): """ Test the message from SumMultiply to its children. """ def compare_moments(u0, u1, *args): Y = SumMultiply(*args) u_Y = Y.get_moments() self.assertAllClose(u_Y[0], u0) self.assertAllClose(u_Y[1], u1) # Test constant parent y = np.random.randn(2,3,4) compare_moments(y, linalg.outer(y, y, ndim=2), 'ij->ij', y) # Do nothing for 2-D array Y = GaussianARD(np.random.randn(5,2,3), np.random.rand(5,2,3), plates=(5,), shape=(2,3)) y = Y.get_moments() compare_moments(y[0], y[1], 'ij->ij', Y) compare_moments(y[0], y[1], Y, [0,1], [0,1]) # Sum over the rows of a matrix Y = GaussianARD(np.random.randn(5,2,3), np.random.rand(5,2,3), plates=(5,), shape=(2,3)) y = Y.get_moments() mu = np.einsum('...ij->...j', y[0]) cov = np.einsum('...ijkl->...jl', y[1]) compare_moments(mu, cov, 'ij->j', Y) compare_moments(mu, cov, Y, [0,1], [1]) # Inner product of three vectors X1 = GaussianARD(np.random.randn(2), np.random.rand(2), plates=(), shape=(2,)) x1 = X1.get_moments() X2 = GaussianARD(np.random.randn(6,1,2), np.random.rand(6,1,2), plates=(6,1), shape=(2,)) x2 = X2.get_moments() X3 = GaussianARD(np.random.randn(7,6,5,2), np.random.rand(7,6,5,2), plates=(7,6,5), shape=(2,)) x3 = X3.get_moments() mu = np.einsum('...i,...i,...i->...', x1[0], x2[0], x3[0]) cov = np.einsum('...ij,...ij,...ij->...', x1[1], x2[1], x3[1]) compare_moments(mu, cov, 'i,i,i', X1, X2, X3) compare_moments(mu, cov, 'i,i,i->', X1, X2, X3) compare_moments(mu, cov, X1, [9], X2, [9], X3, [9]) compare_moments(mu, cov, X1, [9], X2, [9], X3, [9], []) # Outer product of two vectors X1 = GaussianARD(np.random.randn(2), np.random.rand(2), plates=(5,), shape=(2,)) x1 = X1.get_moments() X2 = GaussianARD(np.random.randn(6,1,2), np.random.rand(6,1,2), plates=(6,1), shape=(2,)) x2 = X2.get_moments() mu = np.einsum('...i,...j->...ij', x1[0], x2[0]) cov = np.einsum('...ik,...jl->...ijkl', x1[1], x2[1]) compare_moments(mu, cov, 'i,j->ij', X1, X2) compare_moments(mu, cov, X1, [9], X2, [7], [9,7]) # Matrix product Y1 = GaussianARD(np.random.randn(3,2), np.random.rand(3,2), plates=(), shape=(3,2)) y1 = Y1.get_moments() Y2 = GaussianARD(np.random.randn(5,2,3), np.random.rand(5,2,3), plates=(5,), shape=(2,3)) y2 = Y2.get_moments() mu = np.einsum('...ik,...kj->...ij', y1[0], y2[0]) cov = np.einsum('...ikjl,...kmln->...imjn', y1[1], y2[1]) compare_moments(mu, cov, 'ik,kj->ij', Y1, Y2) compare_moments(mu, cov, Y1, ['i','k'], Y2, ['k','j'], ['i','j']) # Trace of a matrix product Y1 = GaussianARD(np.random.randn(3,2), np.random.rand(3,2), plates=(), shape=(3,2)) y1 = Y1.get_moments() Y2 = GaussianARD(np.random.randn(5,2,3), np.random.rand(5,2,3), plates=(5,), shape=(2,3)) y2 = Y2.get_moments() mu = np.einsum('...ij,...ji->...', y1[0], y2[0]) cov = np.einsum('...ikjl,...kilj->...', y1[1], y2[1]) compare_moments(mu, cov, 'ij,ji', Y1, Y2) compare_moments(mu, cov, 'ij,ji->', Y1, Y2) compare_moments(mu, cov, Y1, ['i','j'], Y2, ['j','i']) compare_moments(mu, cov, Y1, ['i','j'], Y2, ['j','i'], []) # Vector-matrix-vector product X1 = GaussianARD(np.random.randn(3), np.random.rand(3), plates=(), shape=(3,)) x1 = X1.get_moments() X2 = GaussianARD(np.random.randn(6,1,2), np.random.rand(6,1,2), plates=(6,1), shape=(2,)) x2 = X2.get_moments() Y = GaussianARD(np.random.randn(3,2), np.random.rand(3,2), plates=(), shape=(3,2)) y = Y.get_moments() mu = np.einsum('...i,...ij,...j->...', x1[0], y[0], x2[0]) cov = np.einsum('...ia,...ijab,...jb->...', x1[1], y[1], x2[1]) compare_moments(mu, cov, 'i,ij,j', X1, Y, X2) compare_moments(mu, cov, X1, [1], Y, [1,2], X2, [2]) # Complex sum-product of 0-D, 1-D, 2-D and 3-D arrays V = GaussianARD(np.random.randn(7,6,5), np.random.rand(7,6,5), plates=(7,6,5), shape=()) v = V.get_moments() X = GaussianARD(np.random.randn(6,1,2), np.random.rand(6,1,2), plates=(6,1), shape=(2,)) x = X.get_moments() Y = GaussianARD(np.random.randn(3,4), np.random.rand(3,4), plates=(5,), shape=(3,4)) y = Y.get_moments() Z = GaussianARD(np.random.randn(4,2,3), np.random.rand(4,2,3), plates=(6,5), shape=(4,2,3)) z = Z.get_moments() mu = np.einsum('...,...i,...kj,...jik->...k', v[0], x[0], y[0], z[0]) cov = np.einsum('...,...ia,...kjcb,...jikbac->...kc', v[1], x[1], y[1], z[1]) compare_moments(mu, cov, ',i,kj,jik->k', V, X, Y, Z) compare_moments(mu, cov, V, [], X, ['i'], Y, ['k','j'], Z, ['j','i','k'], ['k']) # # Gaussian-gamma parents # # Outer product of vectors X1 = GaussianARD(np.random.randn(2), np.random.rand(2), shape=(2,)) x1 = X1.get_moments() X2 = GaussianGamma( np.random.randn(6,1,2), random.covariance(2), np.random.rand(6,1), np.random.rand(6,1), plates=(6,1) ) x2 = X2.get_moments() Y = SumMultiply('i,j->ij', X1, X2) u = Y._message_to_child() y = np.einsum('...i,...j->...ij', x1[0], x2[0]) yy = np.einsum('...ik,...jl->...ijkl', x1[1], x2[1]) self.assertAllClose(u[0], y) self.assertAllClose(u[1], yy) self.assertAllClose(u[2], x2[2]) self.assertAllClose(u[3], x2[3]) pass def test_message_to_parent(self): """ Test the message from SumMultiply node to its parents. """ data = 2 tau = 3 def check_message(true_m0, true_m1, parent, *args, F=None): if F is None: A = SumMultiply(*args) B = GaussianARD(A, tau) B.observe(data*np.ones(A.plates + A.dims[0])) else: A = F (A_m0, A_m1) = A._message_to_parent(parent) self.assertAllClose(true_m0, A_m0) self.assertAllClose(true_m1, A_m1) pass # Check: different message to each of multiple parents X1 = GaussianARD(np.random.randn(2), np.random.rand(2), ndim=1) x1 = X1.get_moments() X2 = GaussianARD(np.random.randn(2), np.random.rand(2), ndim=1) x2 = X2.get_moments() m0 = tau * data * x2[0] m1 = -0.5 * tau * x2[1] * np.identity(2) check_message(m0, m1, 0, 'i,i->i', X1, X2) check_message(m0, m1, 0, X1, [9], X2, [9], [9]) m0 = tau * data * x1[0] m1 = -0.5 * tau * x1[1] * np.identity(2) check_message(m0, m1, 1, 'i,i->i', X1, X2) check_message(m0, m1, 1, X1, [9], X2, [9], [9]) # Check: key not in output X1 = GaussianARD(np.random.randn(2), np.random.rand(2), ndim=1) x1 = X1.get_moments() m0 = tau * data * np.ones(2) m1 = -0.5 * tau * np.ones((2,2)) check_message(m0, m1, 0, 'i', X1) check_message(m0, m1, 0, 'i->', X1) check_message(m0, m1, 0, X1, [9]) check_message(m0, m1, 0, X1, [9], []) # Check: key not in some input X1 = GaussianARD(np.random.randn(), np.random.rand()) x1 = X1.get_moments() X2 = GaussianARD(np.random.randn(2), np.random.rand(2), ndim=1) x2 = X2.get_moments() m0 = tau * data * np.sum(x2[0], axis=-1) m1 = -0.5 * tau * np.sum(x2[1] * np.identity(2), axis=(-1,-2)) check_message(m0, m1, 0, ',i->i', X1, X2) check_message(m0, m1, 0, X1, [], X2, [9], [9]) m0 = tau * data * x1[0] * np.ones(2) m1 = -0.5 * tau * x1[1] * np.identity(2) check_message(m0, m1, 1, ',i->i', X1, X2) check_message(m0, m1, 1, X1, [], X2, [9], [9]) # Check: keys in different order Y1 = GaussianARD(np.random.randn(3,2), np.random.rand(3,2), ndim=2) y1 = Y1.get_moments() Y2 = GaussianARD(np.random.randn(2,3), np.random.rand(2,3), ndim=2) y2 = Y2.get_moments() m0 = tau * data * y2[0].T m1 = -0.5 * tau * np.einsum('ijlk->jikl', y2[1] * misc.identity(2,3)) check_message(m0, m1, 0, 'ij,ji->ij', Y1, Y2) check_message(m0, m1, 0, Y1, ['i','j'], Y2, ['j','i'], ['i','j']) m0 = tau * data * y1[0].T m1 = -0.5 * tau * np.einsum('ijlk->jikl', y1[1] * misc.identity(3,2)) check_message(m0, m1, 1, 'ij,ji->ij', Y1, Y2) check_message(m0, m1, 1, Y1, ['i','j'], Y2, ['j','i'], ['i','j']) # Check: plates when different dimensionality X1 = GaussianARD(np.random.randn(5), np.random.rand(5), shape=(), plates=(5,)) x1 = X1.get_moments() X2 = GaussianARD(np.random.randn(5,3), np.random.rand(5,3), shape=(3,), plates=(5,)) x2 = X2.get_moments() m0 = tau * data * np.sum(np.ones((5,3)) * x2[0], axis=-1) m1 = -0.5 * tau * np.sum(x2[1] * misc.identity(3), axis=(-1,-2)) check_message(m0, m1, 0, ',i->i', X1, X2) check_message(m0, m1, 0, X1, [], X2, ['i'], ['i']) m0 = tau * data * x1[0][:,np.newaxis] * np.ones((5,3)) m1 = -0.5 * tau * x1[1][:,np.newaxis,np.newaxis] * misc.identity(3) check_message(m0, m1, 1, ',i->i', X1, X2) check_message(m0, m1, 1, X1, [], X2, ['i'], ['i']) # Check: other parent's moments broadcasts over plates when node has the # same plates X1 = GaussianARD(np.random.randn(5,4,3), np.random.rand(5,4,3), shape=(3,), plates=(5,4)) x1 = X1.get_moments() X2 = GaussianARD(np.random.randn(3), np.random.rand(3), shape=(3,), plates=(5,4)) x2 = X2.get_moments() m0 = tau * data * np.ones((5,4,3)) * x2[0] m1 = -0.5 * tau * x2[1] * misc.identity(3) check_message(m0, m1, 0, 'i,i->i', X1, X2) check_message(m0, m1, 0, X1, ['i'], X2, ['i'], ['i']) # Check: other parent's moments broadcasts over plates when node does # not have that plate X1 = GaussianARD(np.random.randn(3), np.random.rand(3), shape=(3,), plates=()) x1 = X1.get_moments() X2 = GaussianARD(np.random.randn(3), np.random.rand(3), shape=(3,), plates=(5,4)) x2 = X2.get_moments() m0 = tau * data * np.sum(np.ones((5,4,3)) * x2[0], axis=(0,1)) m1 = -0.5 * tau * np.sum(np.ones((5,4,1,1)) * misc.identity(3) * x2[1], axis=(0,1)) check_message(m0, m1, 0, 'i,i->i', X1, X2) check_message(m0, m1, 0, X1, ['i'], X2, ['i'], ['i']) # Check: other parent's moments broadcasts over plates when the node # only broadcasts that plate X1 = GaussianARD(np.random.randn(3), np.random.rand(3), shape=(3,), plates=(1,1)) x1 = X1.get_moments() X2 = GaussianARD(np.random.randn(3), np.random.rand(3), shape=(3,), plates=(5,4)) x2 = X2.get_moments() m0 = tau * data * np.sum(np.ones((5,4,3)) * x2[0], axis=(0,1), keepdims=True) m1 = -0.5 * tau * np.sum(np.ones((5,4,1,1)) * misc.identity(3) * x2[1], axis=(0,1), keepdims=True) check_message(m0, m1, 0, 'i,i->i', X1, X2) check_message(m0, m1, 0, X1, ['i'], X2, ['i'], ['i']) # Check: broadcasted dimensions X1 = GaussianARD(np.random.randn(1,1), np.random.rand(1,1), ndim=2) x1 = X1.get_moments() X2 = GaussianARD(np.random.randn(3,2), np.random.rand(3,2), ndim=2) x2 = X2.get_moments() m0 = tau * data * np.sum(np.ones((3,2)) * x2[0], keepdims=True) m1 = -0.5 * tau * np.sum(misc.identity(3,2) * x2[1], keepdims=True) check_message(m0, m1, 0, 'ij,ij->ij', X1, X2) check_message(m0, m1, 0, X1, [0,1], X2, [0,1], [0,1]) m0 = tau * data * np.ones((3,2)) * x1[0] m1 = -0.5 * tau * misc.identity(3,2) * x1[1] check_message(m0, m1, 1, 'ij,ij->ij', X1, X2) check_message(m0, m1, 1, X1, [0,1], X2, [0,1], [0,1]) # Check: non-ARD observations X1 = GaussianARD(np.random.randn(2), np.random.rand(2), ndim=1) x1 = X1.get_moments() Lambda = np.array([[2, 1.5], [1.5, 2]]) F = SumMultiply('i->i', X1) Y = Gaussian(F, Lambda) y = np.random.randn(2) Y.observe(y) m0 = np.dot(Lambda, y) m1 = -0.5 * Lambda check_message(m0, m1, 0, 'i->i', X1, F=F) check_message(m0, m1, 0, X1, ['i'], ['i'], F=F) # Check: mask with same shape X1 = GaussianARD(np.random.randn(3,2), np.random.rand(3,2), shape=(2,), plates=(3,)) x1 = X1.get_moments() mask = np.array([True, False, True]) F = SumMultiply('i->i', X1) Y = GaussianARD(F, tau, ndim=1) Y.observe(data*np.ones((3,2)), mask=mask) m0 = tau * data * mask[:,np.newaxis] * np.ones(2) m1 = -0.5 * tau * mask[:,np.newaxis,np.newaxis] * np.identity(2) check_message(m0, m1, 0, 'i->i', X1, F=F) check_message(m0, m1, 0, X1, ['i'], ['i'], F=F) # Check: mask larger X1 = GaussianARD(np.random.randn(2), np.random.rand(2), shape=(2,), plates=()) x1 = X1.get_moments() X2 = GaussianARD(np.random.randn(3,2), np.random.rand(3,2), shape=(2,), plates=(3,)) x2 = X2.get_moments() mask = np.array([True, False, True]) F = SumMultiply('i,i->i', X1, X2) Y = GaussianARD(F, tau, plates=(3,), ndim=1) Y.observe(data*np.ones((3,2)), mask=mask) m0 = tau * data * np.sum(mask[:,np.newaxis] * x2[0], axis=0) m1 = -0.5 * tau * np.sum(mask[:,np.newaxis,np.newaxis] * x2[1] * np.identity(2), axis=0) check_message(m0, m1, 0, 'i,i->i', X1, X2, F=F) check_message(m0, m1, 0, X1, ['i'], X2, ['i'], ['i'], F=F) # Check: mask for broadcasted plate X1 = GaussianARD(np.random.randn(2), np.random.rand(2), ndim=1, plates=(1,)) x1 = X1.get_moments() X2 = GaussianARD(np.random.randn(2), np.random.rand(2), ndim=1, plates=(3,)) x2 = X2.get_moments() mask = np.array([True, False, True]) F = SumMultiply('i,i->i', X1, X2) Y = GaussianARD(F, tau, plates=(3,), ndim=1) Y.observe(data*np.ones((3,2)), mask=mask) m0 = tau * data * np.sum(mask[:,np.newaxis] * x2[0], axis=0, keepdims=True) m1 = -0.5 * tau * np.sum(mask[:,np.newaxis,np.newaxis] * x2[1] * np.identity(2), axis=0, keepdims=True) check_message(m0, m1, 0, 'i->i', X1, F=F) check_message(m0, m1, 0, X1, ['i'], ['i'], F=F) # Check: Gaussian-gamma parents X1 = GaussianGamma( np.random.randn(2), random.covariance(2), np.random.rand(), np.random.rand() ) x1 = X1.get_moments() X2 = GaussianGamma( np.random.randn(2), random.covariance(2), np.random.rand(), np.random.rand() ) x2 = X2.get_moments() F = SumMultiply('i,i->i', X1, X2) V = random.covariance(2) y = np.random.randn(2) Y = Gaussian(F, V) Y.observe(y) m0 = np.dot(V, y) * x2[0] m1 = -0.5 * V * x2[1] m2 = -0.5 * np.einsum('i,ij,j', y, V, y) * x2[2]#linalg.inner(V, x2[2], ndim=2) m3 = 0.5 * 2 #linalg.chol_logdet(linalg.chol(V)) + 2*x2[3] m = F._message_to_parent(0) self.assertAllClose(m[0], m0) self.assertAllClose(m[1], m1) self.assertAllClose(m[2], m2) self.assertAllClose(m[3], m3) pass def check_performance(scale=1e2): """ Tests that the implementation of SumMultiply is efficient. This is not a unit test (not run automatically), but rather a performance test, which you may run to test the performance of the node. A naive implementation of SumMultiply will run out of memory in some cases and this method checks that the implementation is not naive but good. """ # Check: Broadcasted plates are computed efficiently # (bad implementation will take a long time to run) s = scale X1 = GaussianARD(np.random.randn(s,s), np.random.rand(s,s), shape=(s,), plates=(s,)) X2 = GaussianARD(np.random.randn(s,1,s), np.random.rand(s,1,s), shape=(s,), plates=(s,1)) F = SumMultiply('i,i', X1, X2) Y = GaussianARD(F, 1) Y.observe(np.ones((s,s))) try: F._message_to_parent(1) except e: print(e) print('SOMETHING BAD HAPPENED') # Check: Broadcasted dimensions are computed efficiently # (bad implementation will run out of memory) pass
jluttine/bayespy
bayespy/inference/vmp/nodes/tests/test_dot.py
Python
mit
31,570
[ "Gaussian" ]
eb71725d1d1884d70c9e36b50391afe2044f4bec702478b3b40f0299a55199d5
# Orca # # Copyright 2010 Joanmarie Diggs, Mesar Hameed. # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the # Free Software Foundation, Inc., Franklin Street, Fifth Floor, # Boston MA 02110-1301 USA. """ A list of common keybindings and unbound keys pulled out from default.py: __getDesktopBindings() with the goal of being more readable and less monolithic. """ __id__ = "$Id$" __version__ = "$Revision$" __date__ = "$Date$" __copyright__ = "Copyright (c) 2010 Joanmarie Diggs, Mesar Hameed." __license__ = "LGPL" from . import keybindings # Storing values defaultModifierMask = keybindings.defaultModifierMask ORCA_MODIFIER_MASK = keybindings.ORCA_MODIFIER_MASK NO_MODIFIER_MASK = keybindings.NO_MODIFIER_MASK ORCA_SHIFT_MODIFIER_MASK = keybindings.ORCA_SHIFT_MODIFIER_MASK keymap = ( ("KP_Divide", defaultModifierMask, ORCA_MODIFIER_MASK, "routePointerToItemHandler"), # We want the user to be able to combine modifiers with the # mouse click (e.g. to Shift+Click and select), therefore we # do not "care" about the modifiers -- unless it's the Orca # modifier. # ("KP_Divide", ORCA_MODIFIER_MASK, NO_MODIFIER_MASK, "leftClickReviewItemHandler"), ("KP_Multiply", ORCA_MODIFIER_MASK, NO_MODIFIER_MASK, "rightClickReviewItemHandler"), ("KP_Subtract", defaultModifierMask, NO_MODIFIER_MASK, "toggleFlatReviewModeHandler"), ("KP_Add", defaultModifierMask, NO_MODIFIER_MASK, "sayAllHandler", 1), ("KP_Add", defaultModifierMask, NO_MODIFIER_MASK, "flatReviewSayAllHandler", 2), ("KP_Enter", defaultModifierMask, NO_MODIFIER_MASK, "whereAmIBasicHandler", 1), ("KP_Enter", defaultModifierMask, NO_MODIFIER_MASK, "whereAmIDetailedHandler", 2), ("KP_Enter", defaultModifierMask, ORCA_MODIFIER_MASK, "getTitleHandler", 1), ("KP_Enter", defaultModifierMask, ORCA_MODIFIER_MASK, "getStatusBarHandler", 2), ("KP_Delete", defaultModifierMask, NO_MODIFIER_MASK, "findHandler"), ("KP_Delete", defaultModifierMask, ORCA_MODIFIER_MASK, "findNextHandler"), ("KP_Delete", defaultModifierMask, ORCA_SHIFT_MODIFIER_MASK, "findPreviousHandler"), ("KP_Home", defaultModifierMask, NO_MODIFIER_MASK, "reviewPreviousLineHandler"), ("KP_Home", defaultModifierMask, ORCA_MODIFIER_MASK, "reviewHomeHandler"), ("KP_Up", defaultModifierMask, NO_MODIFIER_MASK, "reviewCurrentLineHandler", 1), ("KP_Up", defaultModifierMask, NO_MODIFIER_MASK, "reviewSpellCurrentLineHandler", 2), ("KP_Up", defaultModifierMask, NO_MODIFIER_MASK, "reviewPhoneticCurrentLineHandler", 3), ("KP_Page_Up", defaultModifierMask, NO_MODIFIER_MASK, "reviewNextLineHandler"), ("KP_Page_Up", defaultModifierMask, ORCA_MODIFIER_MASK, "reviewEndHandler"), ("KP_Left", defaultModifierMask, NO_MODIFIER_MASK, "reviewPreviousItemHandler"), ("KP_Left", defaultModifierMask, ORCA_MODIFIER_MASK, "reviewAboveHandler"), ("KP_Begin", defaultModifierMask, NO_MODIFIER_MASK, "reviewCurrentItemHandler", 1), ("KP_Begin", defaultModifierMask, NO_MODIFIER_MASK, "reviewSpellCurrentItemHandler", 2), ("KP_Begin", defaultModifierMask, NO_MODIFIER_MASK, "reviewPhoneticCurrentItemHandler", 3), ("KP_Begin", defaultModifierMask, ORCA_MODIFIER_MASK, "reviewCurrentAccessibleHandler"), ("KP_Right", defaultModifierMask, NO_MODIFIER_MASK, "reviewNextItemHandler"), ("KP_Right", defaultModifierMask, ORCA_MODIFIER_MASK, "reviewBelowHandler"), ("KP_End", defaultModifierMask, NO_MODIFIER_MASK, "reviewPreviousCharacterHandler"), ("KP_End", defaultModifierMask, ORCA_MODIFIER_MASK, "reviewEndOfLineHandler"), ("KP_Down", defaultModifierMask, NO_MODIFIER_MASK, "reviewCurrentCharacterHandler", 1), ("KP_Down", defaultModifierMask, NO_MODIFIER_MASK, "reviewSpellCurrentCharacterHandler", 2), ("KP_Down", defaultModifierMask, NO_MODIFIER_MASK, "reviewUnicodeCurrentCharacterHandler", 3), ("KP_Page_Down", defaultModifierMask, NO_MODIFIER_MASK, "reviewNextCharacterHandler"), )
GNOME/orca
src/orca/desktop_keyboardmap.py
Python
lgpl-2.1
4,729
[ "ORCA" ]
58cbe19e0fc222c7d227e16023eb3f62853dfc01ec96f623a6cf57a92f421a22
from random import random import pyxel SCENE_TITLE = 0 SCENE_PLAY = 1 SCENE_GAMEOVER = 2 STAR_COUNT = 100 STAR_COLOR_HIGH = 12 STAR_COLOR_LOW = 5 PLAYER_WIDTH = 8 PLAYER_HEIGHT = 8 PLAYER_SPEED = 2 BULLET_WIDTH = 2 BULLET_HEIGHT = 8 BULLET_COLOR = 11 BULLET_SPEED = 4 ENEMY_WIDTH = 8 ENEMY_HEIGHT = 8 ENEMY_SPEED = 1.5 BLAST_START_RADIUS = 1 BLAST_END_RADIUS = 8 BLAST_COLOR_IN = 7 BLAST_COLOR_OUT = 10 enemy_list = [] bullet_list = [] blast_list = [] def update_list(list): for elem in list: elem.update() def draw_list(list): for elem in list: elem.draw() def cleanup_list(list): i = 0 while i < len(list): elem = list[i] if not elem.alive: list.pop(i) else: i += 1 class Background: def __init__(self): self.star_list = [] for i in range(STAR_COUNT): self.star_list.append( (random() * pyxel.width, random() * pyxel.height, random() * 1.5 + 1) ) def update(self): for i, (x, y, speed) in enumerate(self.star_list): y += speed if y >= pyxel.height: y -= pyxel.height self.star_list[i] = (x, y, speed) def draw(self): for (x, y, speed) in self.star_list: pyxel.pset(x, y, STAR_COLOR_HIGH if speed > 1.8 else STAR_COLOR_LOW) class Player: def __init__(self, x, y): self.x = x self.y = y self.w = PLAYER_WIDTH self.h = PLAYER_HEIGHT self.alive = True def update(self): if pyxel.btn(pyxel.KEY_LEFT): self.x -= PLAYER_SPEED if pyxel.btn(pyxel.KEY_RIGHT): self.x += PLAYER_SPEED if pyxel.btn(pyxel.KEY_UP): self.y -= PLAYER_SPEED if pyxel.btn(pyxel.KEY_DOWN): self.y += PLAYER_SPEED self.x = max(self.x, 0) self.x = min(self.x, pyxel.width - self.w) self.y = max(self.y, 0) self.y = min(self.y, pyxel.height - self.h) if pyxel.btnp(pyxel.KEY_SPACE): Bullet( self.x + (PLAYER_WIDTH - BULLET_WIDTH) / 2, self.y - BULLET_HEIGHT / 2 ) pyxel.play(0, 0) def draw(self): pyxel.blt(self.x, self.y, 0, 0, 0, self.w, self.h, 0) class Bullet: def __init__(self, x, y): self.x = x self.y = y self.w = BULLET_WIDTH self.h = BULLET_HEIGHT self.alive = True bullet_list.append(self) def update(self): self.y -= BULLET_SPEED if self.y + self.h - 1 < 0: self.alive = False def draw(self): pyxel.rect(self.x, self.y, self.w, self.h, BULLET_COLOR) class Enemy: def __init__(self, x, y): self.x = x self.y = y self.w = ENEMY_WIDTH self.h = ENEMY_HEIGHT self.dir = 1 self.alive = True self.offset = int(random() * 60) enemy_list.append(self) def update(self): if (pyxel.frame_count + self.offset) % 60 < 30: self.x += ENEMY_SPEED self.dir = 1 else: self.x -= ENEMY_SPEED self.dir = -1 self.y += ENEMY_SPEED if self.y > pyxel.height - 1: self.alive = False def draw(self): pyxel.blt(self.x, self.y, 0, 8, 0, self.w * self.dir, self.h, 0) class Blast: def __init__(self, x, y): self.x = x self.y = y self.radius = BLAST_START_RADIUS self.alive = True blast_list.append(self) def update(self): self.radius += 1 if self.radius > BLAST_END_RADIUS: self.alive = False def draw(self): pyxel.circ(self.x, self.y, self.radius, BLAST_COLOR_IN) pyxel.circb(self.x, self.y, self.radius, BLAST_COLOR_OUT) class App: def __init__(self): pyxel.init(120, 160, caption="Pyxel Shooter") pyxel.image(0).set( 0, 0, [ "00c00c00", "0c7007c0", "0c7007c0", "c703b07c", "77033077", "785cc587", "85c77c58", "0c0880c0", ], ) pyxel.image(0).set( 8, 0, [ "00088000", "00ee1200", "08e2b180", "02882820", "00222200", "00012280", "08208008", "80008000", ], ) pyxel.sound(0).set("a3a2c1a1", "p", "7", "s", 5) pyxel.sound(1).set("a3a2c2c2", "n", "7742", "s", 10) self.scene = SCENE_TITLE self.score = 0 self.background = Background() self.player = Player(pyxel.width / 2, pyxel.height - 20) pyxel.run(self.update, self.draw) def update(self): if pyxel.btnp(pyxel.KEY_Q): pyxel.quit() self.background.update() if self.scene == SCENE_TITLE: self.update_title_scene() elif self.scene == SCENE_PLAY: self.update_play_scene() elif self.scene == SCENE_GAMEOVER: self.update_gameover_scene() def update_title_scene(self): if pyxel.btnp(pyxel.KEY_ENTER): self.scene = SCENE_PLAY def update_play_scene(self): if pyxel.frame_count % 6 == 0: Enemy(random() * (pyxel.width - PLAYER_WIDTH), 0) for a in enemy_list: for b in bullet_list: if ( a.x + a.w > b.x and b.x + b.w > a.x and a.y + a.h > b.y and b.y + b.h > a.y ): a.alive = False b.alive = False blast_list.append( Blast(a.x + ENEMY_WIDTH / 2, a.y + ENEMY_HEIGHT / 2) ) pyxel.play(1, 1) self.score += 10 for enemy in enemy_list: if ( self.player.x + self.player.w > enemy.x and enemy.x + enemy.w > self.player.x and self.player.y + self.player.h > enemy.y and enemy.y + enemy.h > self.player.y ): enemy.alive = False # 自機の爆発を生成する blast_list.append( Blast( self.player.x + PLAYER_WIDTH / 2, self.player.y + PLAYER_HEIGHT / 2, ) ) pyxel.play(1, 1) self.scene = SCENE_GAMEOVER self.player.update() update_list(bullet_list) update_list(enemy_list) update_list(blast_list) cleanup_list(enemy_list) cleanup_list(bullet_list) cleanup_list(blast_list) def update_gameover_scene(self): update_list(bullet_list) update_list(enemy_list) update_list(blast_list) cleanup_list(enemy_list) cleanup_list(bullet_list) cleanup_list(blast_list) if pyxel.btnp(pyxel.KEY_ENTER): self.scene = SCENE_PLAY self.player.x = pyxel.width / 2 self.player.y = pyxel.height - 20 self.score = 0 enemy_list.clear() bullet_list.clear() blast_list.clear() def draw(self): pyxel.cls(0) self.background.draw() if self.scene == SCENE_TITLE: self.draw_title_scene() elif self.scene == SCENE_PLAY: self.draw_play_scene() elif self.scene == SCENE_GAMEOVER: self.draw_gameover_scene() pyxel.text(39, 4, "SCORE {:5}".format(self.score), 7) def draw_title_scene(self): pyxel.text(35, 66, "Pyxel Shooter", pyxel.frame_count % 16) pyxel.text(31, 126, "- PRESS ENTER -", 13) def draw_play_scene(self): self.player.draw() draw_list(bullet_list) draw_list(enemy_list) draw_list(blast_list) def draw_gameover_scene(self): draw_list(bullet_list) draw_list(enemy_list) draw_list(blast_list) pyxel.text(43, 66, "GAME OVER", 8) pyxel.text(31, 126, "- PRESS ENTER -", 13) App()
ferriman/SSandSP
pyxel-test/venv/lib/python3.8/site-packages/pyxel/examples/09_shooter.py
Python
gpl-3.0
8,375
[ "BLAST" ]
e8a274368ac8ad80df79d7b8cea389cf29e7a8bc07037b473e6d9b9c94495781
#! /usr/bin/env python3 # # Copyright (C) 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021 David Maxwell and Constantine Khroulev # # This file is part of PISM. # # PISM is free software; you can redistribute it and/or modify it under the # terms of the GNU General Public License as published by the Free Software # Foundation; either version 3 of the License, or (at your option) any later # version. # # PISM is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the GNU General Public License # along with PISM; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA # try to start coverage try: # pragma: no cover import coverage cov = coverage.coverage(branch=True) try: # try to load coverage data and ignore failures cov.load() except: pass cov.start() except ImportError: # pragma: no cover pass import PISM import PISM.invert.ssa from PISM.logging import logMessage from PISM.util import convert import numpy as np import sys, os, math class SSAForwardRun(PISM.invert.ssa.SSAForwardRunFromInputFile): def write(self, filename, append=False): if not append: PISM.invert.ssa.SSAForwardRunFromInputFile.write(self, filename) else: grid = self.grid vecs = self.modeldata.vecs pio = PISM.File(grid.com, filename, PISM.PISM_NETCDF3, PISM.PISM_READWRITE) self.modeldata.vecs.write(filename) pio.close() class InvSSAPlotListener(PISM.invert.listener.PlotListener): def __init__(self, grid, Vmax): PISM.invert.listener.PlotListener.__init__(self, grid) self.Vmax = Vmax self.l2_weight = None self.l2_weight_init = False def __call__(self, inverse_solver, count, data): if not self.l2_weight_init: vecs = inverse_solver.ssarun.modeldata.vecs if vecs.has('vel_misfit_weight'): self.l2_weight = self.toproczero(vecs.vel_misfit_weight) self.l2_weight_init = True method = inverse_solver.method r = self.toproczero(data.residual) Td = None if 'T_zeta_step' in data: Td = self.toproczero(data.T_zeta_step) TStarR = None if 'TStar_residual' in data: TStarR = self.toproczero(data.TStar_residual) d = None if 'zeta_step' in data: d = self.toproczero(data.zeta_step) zeta = self.toproczero(data.zeta) secpera = convert(1.0, "year", "second") if self.grid.rank() == 0: import matplotlib.pyplot as pp pp.figure(self.figure()) l2_weight = self.l2_weight pp.clf() V = self.Vmax pp.subplot(2, 3, 1) if l2_weight is not None: rx = l2_weight * r[0, :, :] * secpera else: rx = r[0, :, :] * secpera rx = np.maximum(rx, -V) rx = np.minimum(rx, V) pp.imshow(rx, origin='lower', interpolation='nearest') pp.colorbar() pp.title('r_x') pp.jet() pp.subplot(2, 3, 4) if l2_weight is not None: ry = l2_weight * r[1, :, :] * secpera else: ry = r[1, :, :] * secpera ry = np.maximum(ry, -V) ry = np.minimum(ry, V) pp.imshow(ry, origin='lower', interpolation='nearest') pp.colorbar() pp.title('r_y') pp.jet() if method == 'ign': pp.subplot(2, 3, 2) Tdx = Td[0, :, :] * secpera pp.imshow(Tdx, origin='lower', interpolation='nearest') pp.colorbar() pp.title('Td_x') pp.jet() pp.subplot(2, 3, 5) Tdy = Td[1, :, :] * secpera pp.imshow(Tdy, origin='lower', interpolation='nearest') pp.colorbar() pp.title('Td_y') pp.jet() elif method == 'sd' or method == 'nlcg': pp.subplot(2, 3, 2) pp.imshow(TStarR, origin='lower', interpolation='nearest') pp.colorbar() pp.title('TStarR') pp.jet() if d is not None: d *= -1 pp.subplot(2, 3, 3) pp.imshow(d, origin='lower', interpolation='nearest') # colorbar does a divide by zero if 'd' is all zero, # as it will be at the start of iteration zero. # The warning message is a distraction, so we suppress it. import warnings with warnings.catch_warnings(): warnings.simplefilter("ignore") pp.colorbar() pp.jet() pp.title('-zeta_step') pp.subplot(2, 3, 6) pp.imshow(zeta, origin='lower', interpolation='nearest') pp.colorbar() pp.jet() pp.title('zeta') pp.ion() pp.draw() pp.show() class InvSSALinPlotListener(PISM.invert.listener.PlotListener): def __init__(self, grid, Vmax): PISM.invert.listener.PlotListener.__init__(self, grid) self.Vmax = Vmax self.l2_weight = None self.l2_weight_init = False def __call__(self, inverse_solver, count, data): # On the first go-around, extract the l2_weight vector onto # processor zero. if self.l2_weight_init == False: vecs = inverse_solver.ssarun.modeldata.vecs self.l2_weight = self.toproczero(vecs.vel_misfit_weight) self.l2_init = True l2_weight = self.l2_weight r = self.toproczero(data.r) d = self.toproczero(data.d) if self.grid.rank() == 0: import matplotlib.pyplot as pp pp.figure(self.figure()) pp.clf() V = self.Vmax pp.subplot(1, 3, 1) rx = l2_weight * r[0, :, :] rx = np.maximum(rx, -V) rx = np.minimum(rx, V) pp.imshow(rx, origin='lower', interpolation='nearest') pp.colorbar() pp.title('ru') pp.jet() pp.subplot(1, 3, 2) ry = l2_weight * r[1, :, :] ry = np.maximum(ry, -V) ry = np.minimum(ry, V) pp.imshow(ry, origin='lower', interpolation='nearest') pp.colorbar() pp.title('rv') pp.jet() d *= -1 pp.subplot(1, 3, 3) pp.imshow(d, origin='lower', interpolation='nearest') pp.colorbar() pp.jet() pp.title('-d') pp.ion() pp.show() def adjustTauc(mask, tauc): """Where ice is floating or land is ice-free, tauc should be adjusted to have some preset default values.""" logMessage(" Adjusting initial estimate of 'tauc' to match PISM model for floating ice and ice-free bedrock.\n") grid = mask.grid() high_tauc = grid.ctx().config().get_number("basal_yield_stress.ice_free_bedrock") with PISM.vec.Access(comm=tauc, nocomm=mask): for (i, j) in grid.points(): if mask.ocean(i, j): tauc[i, j] = 0 elif mask.ice_free(i, j): tauc[i, j] = high_tauc def createDesignVec(grid, design_var, name=None, **kwargs): if name is None: name = design_var if design_var == "tauc": design_vec = PISM.model.createYieldStressVec(grid, name=name, **kwargs) elif design_var == "hardav": design_vec = PISM.model.createAveragedHardnessVec(grid, name=name, **kwargs) else: raise ValueError("Unknown design variable %s" % design_var) return design_vec # Main code starts here def run(): context = PISM.Context() config = context.config com = context.com PISM.set_abort_on_sigint(True) WIDE_STENCIL = int(config.get_number("grid.max_stencil_width")) usage = \ """ pismi.py [-i IN.nc [-o OUT.nc]]/[-a INOUT.nc] [-inv_data inv_data.nc] [-inv_forward model] [-inv_design design_var] [-inv_method meth] where: -i IN.nc is input file in NetCDF format: contains PISM-written model state -o OUT.nc is output file in NetCDF format to be overwritten -a INOUT.nc is input/output file in NetCDF format to be appended to -inv_data inv_data.nc is data file containing extra inversion data (e.g. observed surface velocities) -inv_forward model forward model: only 'ssa' supported -inv_design design_var design variable name; one of 'tauc'/'hardav' for SSA inversions -inv_method meth algorithm for inversion [sd,nlcg,ign,tikhonov_lmvm] notes: * only one of -i/-a is allowed; both specify the input file * only one of -o/-a is allowed; both specify the output file * if -o is used, only the variables involved in inversion are written to the output file. * if -a is used, the varaibles involved in inversion are appended to the given file. No original variables in the file are changed. """ append_mode = False input_filename = config.get_string("input.file") if len(input_filename) == 0: input_filename = None append = PISM.OptionString("-a", "append file") append_filename = append.value() if append.is_set() else None output_filename = config.get_string("output.file_name") if len(output_filename) == 0: output_filename = None if (input_filename is None) and (append_filename is None): PISM.verbPrintf(1, com, "\nError: No input file specified. Use one of -i [file.nc] or -a [file.nc].\n") sys.exit(0) if (input_filename is not None) and (append_filename is not None): PISM.verbPrintf(1, com, "\nError: Only one of -i/-a is allowed.\n") sys.exit(0) if (output_filename is not None) and (append_filename is not None): PISM.verbPrintf(1, com, "\nError: Only one of -a/-o is allowed.\n") sys.edit(0) if append_filename is not None: input_filename = append_filename output_filename = append_filename append_mode = True inv_data_filename = PISM.OptionString("-inv_data", "inverse data file", input_filename).value() do_plotting = PISM.OptionBool("-inv_plot", "perform visualization during the computation") do_final_plot = PISM.OptionBool("-inv_final_plot", "perform visualization at the end of the computation") Vmax = PISM.OptionReal(context.unit_system, "-inv_plot_vmax", "maximum velocity for plotting residuals", "m / year", 30) design_var = PISM.OptionKeyword("-inv_ssa", "design variable for inversion", "tauc,hardav", "tauc").value() do_pause = PISM.OptionBool("-inv_pause", "pause each iteration") do_restart = PISM.OptionBool("-inv_restart", "Restart a stopped computation.") use_design_prior = config.get_flag("inverse.use_design_prior") prep_module = PISM.OptionString("-inv_prep_module", "Python module used to do final setup of inverse solver") prep_module = prep_module.value() if prep_module.is_set() else None is_regional = PISM.OptionBool("-regional", "Compute SIA/SSA using regional model semantics") using_zeta_fixed_mask = config.get_flag("inverse.use_zeta_fixed_mask") inv_method = config.get_string("inverse.ssa.method") if output_filename is None: output_filename = "pismi_" + os.path.basename(input_filename) saving_inv_data = (inv_data_filename != output_filename) forward_run = SSAForwardRun(input_filename, inv_data_filename, design_var) forward_run.setup() design_param = forward_run.designVariableParameterization() solver = PISM.invert.ssa.createInvSSASolver(forward_run) modeldata = forward_run.modeldata vecs = modeldata.vecs grid = modeldata.grid # Determine the prior guess for tauc/hardav. This can be one of # a) tauc/hardav from the input file (default) # b) tauc/hardav_prior from the inv_datafile if -inv_use_design_prior is set design_prior = createDesignVec(grid, design_var, '%s_prior' % design_var) long_name = design_prior.metadata().get_string("long_name") units = design_prior.metadata().get_string("units") design_prior.set_attrs("", "best prior estimate for %s (used for inversion)" % long_name, units, units, "", 0) if PISM.util.fileHasVariable(inv_data_filename, "%s_prior" % design_var) and use_design_prior: PISM.logging.logMessage(" Reading '%s_prior' from inverse data file %s.\n" % (design_var, inv_data_filename)) design_prior.regrid(inv_data_filename, critical=True) vecs.add(design_prior, writing=saving_inv_data) else: if not PISM.util.fileHasVariable(input_filename, design_var): PISM.verbPrintf(1, com, "Initial guess for design variable is not available as '%s' in %s.\nYou can provide an initial guess in the inverse data file.\n" % ( design_var, input_filename)) exit(1) PISM.logging.logMessage("Reading '%s_prior' from '%s' in input file.\n" % (design_var, design_var)) design = createDesignVec(grid, design_var) design.regrid(input_filename, True) design_prior.copy_from(design) vecs.add(design_prior, writing=True) if using_zeta_fixed_mask: if PISM.util.fileHasVariable(inv_data_filename, "zeta_fixed_mask"): zeta_fixed_mask = PISM.model.createZetaFixedMaskVec(grid) zeta_fixed_mask.regrid(inv_data_filename) vecs.add(zeta_fixed_mask) else: if design_var == 'tauc': logMessage( " Computing 'zeta_fixed_mask' (i.e. locations where design variable '%s' has a fixed value).\n" % design_var) zeta_fixed_mask = PISM.model.createZetaFixedMaskVec(grid) zeta_fixed_mask.set(1) mask = vecs.mask with PISM.vec.Access(comm=zeta_fixed_mask, nocomm=mask): for (i, j) in grid.points(): if mask.grounded_ice(i, j): zeta_fixed_mask[i, j] = 0 vecs.add(zeta_fixed_mask) adjustTauc(vecs.mask, design_prior) elif design_var == 'hardav': PISM.logging.logPrattle( "Skipping 'zeta_fixed_mask' for design variable 'hardav'; no natural locations to fix its value.") pass else: raise NotImplementedError("Unable to build 'zeta_fixed_mask' for design variable %s.", design_var) # Convert design_prior -> zeta_prior zeta_prior = PISM.IceModelVec2S(grid, "zeta_prior", PISM.WITH_GHOSTS, WIDE_STENCIL) design_param.convertFromDesignVariable(design_prior, zeta_prior) vecs.add(zeta_prior, writing=True) # Determine the initial guess for zeta. If we are restarting, load it from # the output file. Otherwise, if 'zeta_inv' is in the inverse data file, use it. # If none of the above, copy from 'zeta_prior'. zeta = PISM.IceModelVec2S(grid, "zeta_inv", PISM.WITH_GHOSTS, WIDE_STENCIL) zeta.set_attrs("diagnostic", "zeta_inv", "1", "1", "zeta_inv", 0) if do_restart: # Just to be sure, verify that we have a 'zeta_inv' in the output file. if not PISM.util.fileHasVariable(output_filename, 'zeta_inv'): PISM.verbPrintf( 1, com, "Unable to restart computation: file %s is missing variable 'zeta_inv'", output_filename) exit(1) PISM.logging.logMessage(" Inversion starting from 'zeta_inv' found in %s\n" % output_filename) zeta.regrid(output_filename, True) elif PISM.util.fileHasVariable(inv_data_filename, 'zeta_inv'): PISM.logging.logMessage(" Inversion starting from 'zeta_inv' found in %s\n" % inv_data_filename) zeta.regrid(inv_data_filename, True) else: zeta.copy_from(zeta_prior) vel_ssa_observed = None vel_ssa_observed = PISM.model.create2dVelocityVec(grid, '_ssa_observed', stencil_width=2) if PISM.util.fileHasVariable(inv_data_filename, "u_ssa_observed"): vel_ssa_observed.regrid(inv_data_filename, True) vecs.add(vel_ssa_observed, writing=saving_inv_data) else: if not PISM.util.fileHasVariable(inv_data_filename, "u_surface_observed"): PISM.verbPrintf( 1, context.com, "Neither u/v_ssa_observed nor u/v_surface_observed is available in %s.\nAt least one must be specified.\n" % inv_data_filename) exit(1) vel_surface_observed = PISM.model.create2dVelocityVec(grid, '_surface_observed', stencil_width=2) vel_surface_observed.regrid(inv_data_filename, True) vecs.add(vel_surface_observed, writing=saving_inv_data) sia_solver = PISM.SIAFD if is_regional: sia_solver = PISM.SIAFD_Regional vel_sia_observed = PISM.sia.computeSIASurfaceVelocities(modeldata, sia_solver) vel_sia_observed.metadata(0).set_name('u_sia_observed') vel_sia_observed.metadata(0).set_string('long_name', "x-component of the 'observed' SIA velocities") vel_sia_observed.metadata(1).set_name('v_sia_observed') vel_sia_observed.metadata(1).set_string('long_name', "y-component of the 'observed' SIA velocities") vel_ssa_observed.copy_from(vel_surface_observed) vel_ssa_observed.add(-1, vel_sia_observed) vecs.add(vel_ssa_observed, writing=True) # If the inverse data file has a variable tauc/hardav_true, this is probably # a synthetic inversion. We'll load it now so that it will get written # out, if needed, at the end of the computation in the output file. if PISM.util.fileHasVariable(inv_data_filename, "%s_true" % design_var): design_true = createDesignVec(grid, design_var, '%s_true' % design_var) design_true.regrid(inv_data_filename, True) try: f = PISM.File(com, inv_data_filename, PISM.PISM_NETCDF3, PISM.PISM_READONLY) PISM.read_attributes(f, design_true.get_name(), design_true.metadata()) finally: f.close() vecs.add(design_true, writing=saving_inv_data) # Establish a logger which will save logging messages to the output file. message_logger = PISM.logging.CaptureLogger(output_filename, 'pismi_log') PISM.logging.add_logger(message_logger) if append_mode or do_restart: message_logger.readOldLog() # Prep the output file from the grid so that we can save zeta to it during the runs. if not append_mode: pio = PISM.util.prepare_output(output_filename) pio.close() zeta.write(output_filename) # Log the command line to the output file now so that we have a record of # what was attempted PISM.util.writeProvenance(output_filename) # Attach various iteration listeners to the solver as needed for: # Iteration report. solver.addIterationListener(PISM.invert.ssa.printIteration) # Misfit reporting/logging. misfit_logger = PISM.invert.ssa.MisfitLogger() solver.addIterationListener(misfit_logger) if inv_method.startswith('tikhonov'): solver.addIterationListener(PISM.invert.ssa.printTikhonovProgress) # Saving the current iteration solver.addDesignUpdateListener(PISM.invert.ssa.ZetaSaver(output_filename)) # Plotting if do_plotting: solver.addIterationListener(InvSSAPlotListener(grid, Vmax)) if solver.method == 'ign': solver.addLinearIterationListener(InvSSALinPlotListener(grid, Vmax)) # Solver is set up. Give the user's prep module a chance to do any final # setup. if prep_module is not None: if prep_module.endswith(".py"): prep_module = prep_module[0:-2] exec("import %s as user_prep_module" % prep_module) user_prep_module.prep_solver(solver) # Pausing (add this after the user's listeners) if do_pause: solver.addIterationListener(PISM.invert.listener.pauseListener) # Run the inverse solver! if do_restart: PISM.logging.logMessage('************** Restarting inversion. ****************\n') else: PISM.logging.logMessage('============== Starting inversion. ==================\n') # Try solving reason = solver.solveInverse(zeta_prior, vel_ssa_observed, zeta) if reason.failed(): PISM.logging.logError("Inverse solve FAILURE:\n%s\n" % reason.nested_description(1)) quit() PISM.logging.logMessage("Inverse solve success (%s)!\n" % reason.description()) (zeta, u) = solver.inverseSolution() # It may be that a 'tauc'/'hardav' was read in earlier. We replace it with # our newly generated one. if vecs.has(design_var): design = vecs.get(design_var) design_param.convertToDesignVariable(zeta, design) else: # Convert back from zeta to tauc or hardav design = createDesignVec(grid, design_var) design_param.convertToDesignVariable(zeta, design) vecs.add(design, writing=True) vecs.add(zeta, writing=True) u.metadata(0).set_name("u_ssa_inv") u.metadata(0).set_string("long_name", "x-component of SSA velocity computed by inversion") u.metadata(1).set_name("v_ssa_inv") u.metadata(1).set_string("long_name", "y-component of SSA velocity computed by inversion") vecs.add(u, writing=True) residual = PISM.model.create2dVelocityVec(grid, name='_inv_ssa_residual') residual.copy_from(u) residual.add(-1, vel_ssa_observed) r_mag = PISM.IceModelVec2S(grid, "inv_ssa_residual", PISM.WITHOUT_GHOSTS, 0) r_mag.set_attrs("diagnostic", "magnitude of mismatch between observed surface velocities and their reconstrution by inversion", "m s-1", "m year-1", "inv_ssa_residual", 0) r_mag.metadata().set_number("_FillValue", convert(-0.01, 'm/year', 'm/s')) r_mag.metadata().set_number("valid_min", 0.0) PISM.compute_magnitude(residual, r_mag) PISM.apply_mask(vecs.land_ice_thickness, 0.0, r_mag) vecs.add(residual, writing=True) vecs.add(r_mag, writing=True) # Write solution out to netcdf file (always append because the file was created already) forward_run.write(output_filename, append=True) # If we're not in append mode, the previous command just nuked # the output file. So we rewrite the siple log. if not append_mode: message_logger.write(output_filename) # Save the misfit history misfit_logger.write(output_filename) if __name__ == "__main__": run() # try to stop coverage and save a report: try: # pragma: no cover cov.stop() report = PISM.OptionBool("-report_coverage", "save coverage information and a report") if report: cov.save() cov.html_report(include=["pismi.py"], directory="pismi_coverage") except: # pragma: no cover pass
pism/pism
examples/inverse/pismi.py
Python
gpl-3.0
23,714
[ "NetCDF" ]
8ef5ca881c70bc61ec0547e9aee89d0d80c40a0e40d7a8919364f8360cd6be5c
import os import unittest import datetime import tempfile import numpy as np from nansat.utils import gdal from netCDF4 import Dataset from nansat.mappers.mapper_netcdf_cf import Mapper from mock import patch, Mock, DEFAULT class NetCDF_CF_Tests(unittest.TestCase): def setUp(self): fd, self.tmp_filename = tempfile.mkstemp(suffix='.nc') ds = Dataset(self.tmp_filename, 'w') lat_sz = 30 lon_sz = 20 height_sz = 10 # Set dimensions ds.createDimension('latitude', lat_sz) ds.createDimension('longitude', lon_sz) ds.createDimension('time', 3) ds.createDimension('pressure', 7) ds.createDimension('height', height_sz) ds.createDimension('dimension_rgb', 3) # intentionally no variable added.. # Set variables # 1d "dimensional" variables i.e lats, times, etc. times = ds.createVariable('time', 'i4', ('time')) times.units = 'seconds since 1970-01-01 00:00' times.standard_name = 'time' times.long_name = 'time' times[0] = np.ma.MaskedArray(data = 1560610800.0, mask = False, fill_value = 1e+20) times[1] = np.ma.MaskedArray(data = 1560621600.0, mask = False, fill_value = 1e+20) times[2] = np.ma.MaskedArray(data = 1560632400.0, mask = False, fill_value = 1e+20) heights = ds.createVariable('height', 'i4', ('height')) heights[:] = np.linspace(10, 100, height_sz) lats = ds.createVariable('latitude', 'i4', ('latitude')) lats[:] = np.linspace(0, 60, lat_sz) lons = ds.createVariable('longitude', 'i4', ('longitude')) lons[:] = np.linspace(0, 20, lon_sz) # Spatial variables 2d, 3d, and 4d var2d = ds.createVariable('var2d', 'i4', ('latitude', 'longitude')) var3d = ds.createVariable('var3d', 'i4', ('time', 'latitude', 'longitude')) var3d.standard_name = 'x_wind' var4d = ds.createVariable('var4d', 'f4', ('time', 'pressure', 'latitude', 'longitude')) var4d.standard_name = 'x_wind' var5d = ds.createVariable('var5d', 'f4', ('time', 'pressure', 'height', 'latitude', 'longitude')) var5d.standard_name = 'x_wind' # gdal should read this as several bands of shape (longitude,pressure)=(20, 7) buggy_var = ds.createVariable('buggy_var', 'f4', ('time', 'latitude', 'longitude', 'pressure')) buggy_var.standard_name = 'x_wind' # A variable with a dimension that is not itself added as a variable rgb_var = ds.createVariable('rgb_var', 'f4', ('dimension_rgb', 'latitude', 'longitude')) pressures = ds.createVariable('pressure', 'i4', ('pressure')) pressures.standard_name = 'air_pressure' pressures.description = 'pressure' pressures.long_name = 'pressure' pressures.positive = 'down' pressures.units = 'hPa' pressures[0] = np.ma.MaskedArray(data = 200., mask = False, fill_value = 1e+20) pressures[1] = np.ma.MaskedArray(data = 250., mask = False, fill_value = 1e+20) pressures[2] = np.ma.MaskedArray(data = 300., mask = False, fill_value = 1e+20) pressures[3] = np.ma.MaskedArray(data = 400., mask = False, fill_value = 1e+20) pressures[4] = np.ma.MaskedArray(data = 500., mask = False, fill_value = 1e+20) pressures[5] = np.ma.MaskedArray(data = 700., mask = False, fill_value = 1e+20) pressures[6] = np.ma.MaskedArray(data = 800., mask = False, fill_value = 1e+20) ds.close() os.close(fd) # Just in case - see https://www.logilab.org/blogentry/17873 fd, self.tmp_filename_xy = tempfile.mkstemp(suffix='.nc') ds = Dataset(self.tmp_filename_xy, 'w') y_sz = 30 x_sz = 20 # Set dimensions ds.createDimension('y', y_sz) ds.createDimension('x', x_sz) ds.createDimension('some_times', 3) # Set variables # 1d "dimensional" variables i.e lats, times, etc. times = ds.createVariable('some_times', 'i4', ('some_times')) times.units = 'seconds since 1970-01-01 00:00' times.standard_name = 'time' times.long_name = 'time' times[0] = np.ma.MaskedArray(data = 1560610800.0, mask = False, fill_value = 1e+20) times[1] = np.ma.MaskedArray(data = 1560621600.0, mask = False, fill_value = 1e+20) times[2] = np.ma.MaskedArray(data = 1560632400.0, mask = False, fill_value = 1e+20) ys = ds.createVariable('y', 'i4', ('y')) ys[:] = np.linspace(0, 60, y_sz) xs = ds.createVariable('x', 'i4', ('x')) xs[:] = np.linspace(0, 20, x_sz) # Spatial variables 2d and 3d var2d = ds.createVariable('var2d', 'i4', ('y', 'x')) var3d = ds.createVariable('var3d', 'i4', ('some_times', 'y', 'x')) var3d.standard_name = 'x_wind' ds.close() os.close(fd) # Just in case - see https://www.logilab.org/blogentry/17873 fd, self.tmp_filename_no_time_var = tempfile.mkstemp(suffix='.nc') ds = Dataset(self.tmp_filename_no_time_var, 'w') y_sz = 30 x_sz = 20 # Set dimensions ds.createDimension('y', y_sz) ds.createDimension('x', x_sz) # Set variables # 1d "dimensional" variables i.e lats, times, etc. ys = ds.createVariable('y', 'i4', ('y')) ys.standard_name = 'projection_y_coordinate' ys[:] = np.linspace(0, 60, y_sz) xs = ds.createVariable('x', 'i4', ('x')) xs.standard_name = 'projection_x_coordinate' xs[:] = np.linspace(0, 20, x_sz) # Spatial variables 2d and 3d var2d = ds.createVariable('var2d', 'i4', ('y', 'x')) var2d.standard_name = 'x_wind' ds.close() os.close(fd) # Just in case - see https://www.logilab.org/blogentry/17873 def tearDown(self): os.unlink(self.tmp_filename) os.unlink(self.tmp_filename_xy) os.unlink(self.tmp_filename_no_time_var) @patch('nansat.mappers.mapper_netcdf_cf.Mapper.__init__') def test__timevarname(self, mock_init): mock_init.return_value = None mm = Mapper() mm.input_filename = self.tmp_filename_no_time_var timevar_name = mm._timevarname() self.assertEqual(timevar_name, '') mm = Mapper() mm.input_filename = self.tmp_filename_xy timevar_name = mm._timevarname() self.assertEqual(timevar_name, 'some_times') mm = Mapper() mm.input_filename = self.tmp_filename timevar_name = mm._timevarname() self.assertEqual(timevar_name, 'time') @patch('nansat.mappers.mapper_netcdf_cf.Mapper._time_reference') @patch('nansat.mappers.mapper_netcdf_cf.Mapper.__init__') def test_time_count_to_np_datetime64(self, mock_init, mock_units): mock_init.return_value = None tu = (datetime.datetime(1900, 1, 1, 0, 0), 'days since 1900-1-1 0:0:0 +0') mock_units.return_value = tu mm = Mapper() time_count = '43648.22734953704' # TEST DAYS tt = mm._time_count_to_np_datetime64(time_count) # Assert data type of tt is np.datetime64 self.assertEqual(type(tt), np.datetime64) tt = mm._time_count_to_np_datetime64(time_count, time_reference=tu) # Assert data type of tt is np.datetime64 self.assertEqual(type(tt), np.datetime64) @patch('nansat.mappers.mapper_netcdf_cf.Mapper._time_reference') @patch('nansat.mappers.mapper_netcdf_cf.Mapper.__init__') def test_time_count_to_np_datetime64(self, mock_init, mock_units): mock_init.return_value = None tu = (datetime.datetime(1900, 1, 1, 0, 0), 'days since 1900-1-1 0:0:0 +0') mock_units.return_value = tu mm = Mapper() time_count = '43648.22734953704' # TEST DAYS tt = mm._time_count_to_np_datetime64(time_count) # Assert data type of tt is np.datetime64 self.assertEqual(type(tt), np.datetime64) tt = mm._time_count_to_np_datetime64(time_count, time_reference=tu) # Assert data type of tt is np.datetime64 self.assertEqual(type(tt), np.datetime64) time_count = '43648' tt = mm._time_count_to_np_datetime64(time_count) # Assert data type of tt is np.datetime64 self.assertEqual(type(tt), np.datetime64) self.assertEqual(tt, np.datetime64('2019-07-04')) # TEST HOURS tu = (datetime.datetime(1900, 1, 1, 0, 0), 'hours since 1900-1-1 0:0:0 +0') tt = mm._time_count_to_np_datetime64(time_count, time_reference=tu) # Assert data type of tt is np.datetime64 self.assertEqual(type(tt), np.datetime64) self.assertEqual(tt, np.datetime64('1904-12-24T16:00:00.000000')) time_count = '43648.22734953704' tt = mm._time_count_to_np_datetime64(time_count, time_reference=tu) # Assert data type of tt is np.datetime64 self.assertEqual(type(tt), np.datetime64) self.assertEqual(tt, np.datetime64('1904-12-24T16:13:38.458333')) # TEST MINUTES tu = (datetime.datetime(1900, 1, 1, 0, 0), 'minutes since 1900-1-1 0:0:0 +0') tt = mm._time_count_to_np_datetime64(time_count, time_reference=tu) # Assert data type of tt is np.datetime64 self.assertEqual(type(tt), np.datetime64) self.assertEqual(tt, np.datetime64('1900-01-31T07:28:13.640972')) # TEST SECONDS tu = (datetime.datetime(1900, 1, 1, 0, 0), 'seconds since 1900-1-1 0:0:0 +0') tt = mm._time_count_to_np_datetime64(time_count, time_reference=tu) # Assert data type of tt is np.datetime64 self.assertEqual(type(tt), np.datetime64) self.assertEqual(tt, np.datetime64('1900-01-01T12:07:28.227350')) @patch('nansat.mappers.mapper_netcdf_cf.Mapper.__init__') def test_variable_with_a_dimension_that_is_not_itself_added_as_a_variable(self, mock_init): mock_init.return_value = None mm = Mapper() mm.input_filename = self.tmp_filename fn = 'NETCDF:"' + self.tmp_filename + '":rgb_var' bdict = mm._get_band_from_subfile(fn) self.assertEqual(bdict['src']['SourceBand'], 1) @patch('nansat.mappers.mapper_netcdf_cf.Mapper.__init__') def test_with_xy_dims(self, mock_init): mock_init.return_value = None mm = Mapper() mm.input_filename = self.tmp_filename_xy fn = 'NETCDF:"' + self.tmp_filename + '":var3d' bdict = mm._get_band_from_subfile(fn, bands=['x_wind']) self.assertEqual(bdict['src']['SourceBand'], 1) @patch('nansat.mappers.mapper_netcdf_cf.Mapper.__init__') def test_buggy_var(self, mock_init): """ The last band dimensions should be latitude and longitude - otherwise gdal will fail in reading the data correctly. This is to confirm that this understanding is correct.. The shape of buggy_var is ('time', 'latitude', 'longitude', 'pressure') """ mock_init.return_value = None mm = Mapper() mm.input_filename = self.tmp_filename fn = 'NETCDF:"' + self.tmp_filename + '":buggy_var' bdict = mm._get_band_from_subfile(fn, bands=['x_wind']) self.assertEqual(bdict['src']['SourceBand'], 1) self.assertEqual(bdict['dst']['NETCDF_DIM_latitude'], '0') self.assertEqual(bdict['dst']['time_iso_8601'], np.datetime64('2019-06-15T15:00:00.000000')) subds = gdal.Open(fn) self.assertEqual(subds.RasterXSize, 7) # size of pressure dimension self.assertEqual(subds.RasterYSize, 20) # size of longitude dimension @patch('nansat.mappers.mapper_netcdf_cf.Mapper.__init__') def test__get_band_from_subfile__var4d(self, mock_init): mock_init.return_value = None mm = Mapper() mm.input_filename = self.tmp_filename fn = 'NETCDF:"' + self.tmp_filename + '":var4d' bdict200 = mm._get_band_from_subfile(fn, netcdf_dim = { 'time': np.datetime64('2019-06-15T18:00'), # 2nd band 'pressure': 200}, bands=['x_wind']) bdict500 = mm._get_band_from_subfile(fn, netcdf_dim = { 'time': np.datetime64('2019-06-15T18:00'), # 2nd band 'pressure': 500}, bands=['x_wind']) self.assertEqual(bdict200['src']['SourceBand'], 8) self.assertEqual(bdict200['dst']['NETCDF_DIM_pressure'], '200') self.assertEqual(bdict200['dst']['time_iso_8601'], np.datetime64('2019-06-15T18:00:00.000000')) self.assertEqual(bdict500['src']['SourceBand'], 12) self.assertEqual(bdict500['dst']['NETCDF_DIM_pressure'], '500') self.assertEqual(bdict500['dst']['time_iso_8601'], np.datetime64('2019-06-15T18:00:00.000000')) @patch('nansat.mappers.mapper_netcdf_cf.Mapper.__init__') def test__get_band_from_subfile__var5d(self, mock_init): mock_init.return_value = None mm = Mapper() mm.input_filename = self.tmp_filename fn = 'NETCDF:"' + self.tmp_filename + '":var5d' # should give 1 band when time, pressure and height is in netcdf_dim bdict1 = mm._get_band_from_subfile(fn, netcdf_dim = { 'time': np.datetime64('2019-06-15T18:00'), 'pressure': 200, 'height': 20}, bands=['x_wind']) self.assertEqual(bdict1['dst']['NETCDF_DIM_height'], '20') self.assertEqual(bdict1['dst']['NETCDF_DIM_pressure'], '200') self.assertEqual(bdict1['dst']['time_iso_8601'], np.datetime64('2019-06-15T18:00:00.000000')) self.assertEqual(bdict1['src']['SourceBand'], 72) # should give first height_sz band when only time and pressure is in netcdf_dim bdict1 = mm._get_band_from_subfile(fn, netcdf_dim = { 'time': np.datetime64('2019-06-15T18:00'), 'pressure': 200}, bands=['x_wind']) self.assertEqual(bdict1['dst']['NETCDF_DIM_height'], '10') self.assertEqual(bdict1['dst']['NETCDF_DIM_pressure'], '200') self.assertEqual(bdict1['dst']['time_iso_8601'], np.datetime64('2019-06-15T18:00:00.000000')) self.assertEqual(bdict1['src']['SourceBand'], 71) # should give first height_sz and pressure bands when only time is in netcdf_dim bdict1 = mm._get_band_from_subfile(fn, netcdf_dim = { 'time': np.datetime64('2019-06-15T18:00'), }, bands=['x_wind']) self.assertEqual(bdict1['dst']['NETCDF_DIM_height'], '10') self.assertEqual(bdict1['dst']['NETCDF_DIM_pressure'], '200') self.assertEqual(bdict1['dst']['time_iso_8601'], np.datetime64('2019-06-15T18:00:00.000000')) self.assertEqual(bdict1['src']['SourceBand'], 71) bdict1 = mm._get_band_from_subfile(fn, netcdf_dim = { 'pressure': 300, # 3rd band }, bands=['x_wind']) self.assertEqual(bdict1['dst']['NETCDF_DIM_height'], '10') self.assertEqual(bdict1['dst']['NETCDF_DIM_pressure'], '300') self.assertEqual(bdict1['dst']['time_iso_8601'], np.datetime64('2019-06-15T15:00:00.000000')) self.assertEqual(bdict1['src']['SourceBand'], 21) bdict1 = mm._get_band_from_subfile(fn, netcdf_dim = { 'height': 30, # 3rd band }, bands=['x_wind']) self.assertEqual(bdict1['dst']['NETCDF_DIM_height'], '30') self.assertEqual(bdict1['dst']['NETCDF_DIM_pressure'], '200') self.assertEqual(bdict1['dst']['time_iso_8601'], np.datetime64('2019-06-15T15:00:00.000000')) self.assertEqual(bdict1['src']['SourceBand'], 3) bdict1 = mm._get_band_from_subfile(fn, netcdf_dim = { 'time': np.datetime64('2019-06-15T15:00'), }, bands=['x_wind']) self.assertEqual(bdict1['dst']['NETCDF_DIM_height'], '10') self.assertEqual(bdict1['dst']['NETCDF_DIM_pressure'], '200') self.assertEqual(bdict1['dst']['time_iso_8601'], np.datetime64('2019-06-15T15:00:00.000000')) self.assertEqual(bdict1['src']['SourceBand'], 1) # should give first band when netcdf_dim is empty bdict1 = mm._get_band_from_subfile(fn, bands=['x_wind']) self.assertEqual(bdict1['dst']['NETCDF_DIM_height'], '10') self.assertEqual(bdict1['dst']['NETCDF_DIM_pressure'], '200') self.assertEqual(bdict1['dst']['time_iso_8601'], np.datetime64('2019-06-15T15:00:00.000000')) self.assertEqual(bdict1['src']['SourceBand'], 1)
nansencenter/nansat
nansat/tests/mappers/test_mapper_netcdf_cf.py
Python
gpl-3.0
16,960
[ "NetCDF" ]
3b8e7ad7878e8558fc151f12c41c61ffdf988542b69272cfd843d91ae5abeaa0
# Lint as: python3 # Copyright 2021 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for lingvo Jax transformer layers.""" import itertools from absl import logging from absl.testing import absltest from absl.testing import parameterized import jax from jax import numpy as jnp from lingvo.core import batch_major_attention from lingvo.core import layers_with_attention from lingvo.jax import base_layer from lingvo.jax import py_utils from lingvo.jax import test_utils from lingvo.jax.layers import attentions from lingvo.jax.layers import transformers import numpy as np import tensorflow.compat.v2 as tf class TransformersTest(test_utils.TestCase): def setUp(self): super().setUp() np.random.seed(123456) tf.random.set_seed(123) @parameterized.parameters(*list(itertools.product([True, False], repeat=3))) def test_transformer_layer(self, mask_self_attention, packed_input, cross_attention): p = transformers.Transformer.Params().Set( name='jax_transformer_layer', input_dims=32, hidden_dims=128, num_heads=8, mask_self_attention=mask_self_attention, packed_input=packed_input, cross_attention=cross_attention) seq_len = np.random.randint(10, 32) batch_size = 10 transformer_layer = p.Instantiate() prng_key = jax.random.PRNGKey(seed=123) initial_vars = transformer_layer.instantiate_variables(prng_key) npy_inputs = np.random.normal( 1.0, 0.5, [batch_size, seq_len, p.input_dims]).astype('float32') inputs = jnp.asarray(npy_inputs) npy_paddings = np.random.randint(0, 1, [batch_size, seq_len]).astype('float32') paddings = jnp.asarray(npy_paddings) causal_mask = None segment_mask = None tf_segment_mask = None attention_mask = attentions.convert_paddings_to_mask(paddings) if mask_self_attention: causal_mask = attentions.causal_mask(inputs) attention_mask = jnp.minimum(attention_mask, causal_mask) if packed_input: segment_ids = np.random.random_integers(0, 2, [batch_size, seq_len]) segment_mask = attentions.segment_mask(segment_ids, dtype=np.float32) attention_mask = jnp.minimum(attention_mask, segment_mask) if mask_self_attention: tf_segment_mask = batch_major_attention.CausalSegmentMask( segment_ids, tf.float32) else: tf_segment_mask = batch_major_attention.SegmentMask( segment_ids, segment_ids) cross_inputs = None cross_attention_mask = None tf_cross_inputs = None tf_cross_paddings = None tf_cross_segment_mask = None if cross_attention: cross_seq_len = np.random.randint(10, 128) npy_cross_inputs = np.random.normal( 1.0, 0.5, [batch_size, cross_seq_len, p.input_dims]).astype('float32') cross_inputs = jnp.asarray(npy_cross_inputs) tf_cross_inputs = tf.constant(npy_cross_inputs, dtype=tf.float32) npy_cross_paddings = np.random.randint( 0, 1, [batch_size, cross_seq_len]).astype('float32') cross_paddings = jnp.asarray(npy_cross_paddings) cross_attention_mask = attentions.convert_paddings_to_mask(cross_paddings) tf_cross_paddings = tf.constant(npy_cross_paddings, dtype=tf.float32) if packed_input: source_segment_ids = np.random.random_integers( 0, 2, [batch_size, cross_seq_len]) cross_segment_mask = attentions.segment_mask( segment_ids, source_segment_ids, dtype=np.float32) cross_attention_mask = jnp.minimum(cross_attention_mask, cross_segment_mask) tf_cross_segment_mask = batch_major_attention.SegmentMask( segment_ids, source_segment_ids) outputs, _ = test_utils.apply( transformer_layer, initial_vars, transformer_layer.fprop, inputs, paddings, context_p=None, attention_mask=attention_mask, cross_inputs=cross_inputs, cross_attention_mask=cross_attention_mask) logging.info('initial_vars in transformer layer = %s', initial_vars) # Test whether tf Transformer layer returns same output # Modify initial_vars to use TF compatible params tf_initial_vars = test_utils.replace_jax_attention_vars_to_tf( initial_vars, cross_attention) tf_initial_vars = test_utils.to_tf_nmap(tf_initial_vars) logging.info('tf_initial_vars in transformer layer = %s', initial_vars) tf_p = batch_major_attention.TransformerLayer.Params().Set( name='tf_transformer_layer', input_dim=p.input_dims, num_heads=p.num_heads, mask_self_atten=mask_self_attention, packed_input=packed_input, has_aux_atten=cross_attention) tf_p.tr_fflayer_tpl.hidden_dim = p.hidden_dims tf_p.tr_fflayer_tpl.fflayer_tpl.batch_norm = False tf_p.tr_fflayer_tpl.fflayer_tpl.has_bias = True tf_transformer_layer = tf_p.Instantiate() tf_output, _ = tf_transformer_layer.FProp( tf_initial_vars, tf.constant(npy_inputs, dtype=tf.float32), paddings=test_utils.to_tf_nmap(npy_paddings), segment_mask=tf_segment_mask, aux_vec=tf_cross_inputs, aux_paddings=tf_cross_paddings, aux_segment_mask=test_utils.to_tf_nmap(tf_cross_segment_mask)) np_outputs = test_utils.to_np(outputs) tf_np_outputs = test_utils.to_np(tf_output) self.assertAllClose(tf_np_outputs, np_outputs, atol=1e-5) @parameterized.parameters(*list(itertools.product([True, False], repeat=4))) def test_transformer_layer_extendstep(self, packed_input, cross_attention, dconv_qkv, use_rotary_position_emb): p = transformers.Transformer.Params().Set( name='jax_transformer_layer', input_dims=8, hidden_dims=32, num_heads=4, mask_self_attention=True, packed_input=packed_input, cross_attention=cross_attention) p.tr_atten_tpl.dconv_qkv = dconv_qkv p.tr_atten_tpl.use_rotary_position_emb = use_rotary_position_emb if cross_attention: p.cross_atten_tpl = p.tr_atten_tpl.Copy() # Cross attention should not have depth-wise convolution. p.cross_atten_tpl.dconv_qkv = False # Cross attention should not have rotary position embedding. p.cross_atten_tpl.use_rotary_position_emb = False p.tr_atten_tpl.dconv_kernel_size = 2 seq_len = 4 batch_size = 4 transformer_layer = p.Instantiate() prng_key = jax.random.PRNGKey(seed=123) initial_vars = transformer_layer.instantiate_variables(prng_key) initial_states = transformer_layer.init_states(batch_size, seq_len) npy_inputs = np.random.normal( 1.0, 0.5, [batch_size, seq_len, p.input_dims]).astype('float32') inputs = jnp.asarray(npy_inputs) npy_paddings = np.random.randint(0, 1, [batch_size, seq_len]).astype('float32') # npy_paddings = np.zeros([batch_size, seq_len]) paddings = jnp.asarray(npy_paddings) attention_mask = attentions.convert_paddings_to_mask(paddings) segment_mask = None causal_mask = attentions.causal_mask(inputs) attention_mask = jnp.minimum(causal_mask, attention_mask) if packed_input: segment_ids = np.random.random_integers(0, 2, [batch_size, seq_len]) segment_mask = attentions.segment_mask(segment_ids, dtype=np.float32) attention_mask = jnp.minimum(attention_mask, segment_mask) cross_inputs = None cross_paddings = None cross_attention_mask = None if cross_attention: cross_seq_len = np.random.randint(10, 32) npy_cross_inputs = np.random.normal( 1.0, 0.5, [batch_size, cross_seq_len, p.input_dims]).astype('float32') cross_inputs = jnp.asarray(npy_cross_inputs) npy_cross_paddings = np.random.randint( 0, 1, [batch_size, cross_seq_len]).astype('float32') cross_paddings = jnp.asarray(npy_cross_paddings) cross_attention_mask = attentions.convert_paddings_to_mask(cross_paddings) if packed_input: source_segment_ids = np.random.random_integers( 0, 2, [batch_size, cross_seq_len]) cross_segment_mask = attentions.segment_mask( segment_ids, source_segment_ids, dtype=np.float32) cross_attention_mask = jnp.minimum(cross_attention_mask, cross_segment_mask) with base_layer.JaxContext.new_context( prng_key=prng_key, global_step=jnp.array(0, dtype=jnp.uint32)) as jax_context: jax_context.bind(transformer_layer, transformer_layer.vars_to_flax_vars(initial_vars)) fprop_outputs, _ = transformer_layer.fprop( inputs, paddings, attention_mask=attention_mask, cross_inputs=cross_inputs, cross_attention_mask=cross_attention_mask) decoder_outputs = jnp.zeros(shape=[seq_len, batch_size, p.input_dims]) atten_states = initial_states for t in range(seq_len): attention_mask_t = attention_mask[:, :, t, :] cross_attention_mask_t = cross_attention_mask if cross_attention: cross_attention_mask_t = cross_attention_mask[:, :, t, :] cross_attention_mask_t = np.expand_dims( cross_attention_mask_t, axis=2) atten_states, encoded = transformer_layer.extend_step( atten_states, inputs=inputs[:, t, :], time_step=t, attention_mask=attention_mask_t, cross_inputs=cross_inputs, cross_attention_mask=cross_attention_mask_t) decoder_outputs = decoder_outputs.at[t].set(encoded) decoder_out_transposed = jnp.transpose(decoder_outputs, [1, 0, 2]) logging.info('initial_vars in transformer layer = %s', initial_vars) np_fprop_outputs = test_utils.to_np(fprop_outputs) np_decoder_outputs = test_utils.to_np(decoder_out_transposed) self.assertAllClose(np_fprop_outputs, np_decoder_outputs, atol=1e-5) @parameterized.parameters(True, False) def test_transformer_layer_cross_attention_ln(self, packed_input): p = transformers.Transformer.Params().Set( name='jax_transformer_layer', input_dims=8, hidden_dims=32, num_heads=4, mask_self_attention=True, packed_input=packed_input, cross_attention=True) seq_len = 5 batch_size = 4 transformer_layer = p.Instantiate() prng_key = jax.random.PRNGKey(seed=123) initial_vars = transformer_layer.instantiate_variables(prng_key) # Change the self attention initial vars. initial_vars.layer_norm.scale = 0.5 initial_vars.layer_norm.bias = 5.0 # Change the cross attention initial vars. initial_vars.cross_layer_norm.scale = 15 initial_vars.cross_layer_norm.bias = 1.5 npy_inputs = np.random.normal( 1.0, 0.5, [batch_size, seq_len, p.input_dims]).astype('float32') inputs = jnp.asarray(npy_inputs) npy_paddings = np.random.randint(0, 1, [batch_size, seq_len]).astype('float32') paddings = jnp.asarray(npy_paddings) attention_mask = attentions.convert_paddings_to_mask(paddings) causal_mask = attentions.causal_mask(inputs) attention_mask = jnp.minimum(causal_mask, attention_mask) if packed_input: segment_ids = np.random.random_integers(0, 2, [batch_size, seq_len]) segment_mask = attentions.segment_mask(segment_ids, dtype=np.float32) attention_mask = jnp.minimum(attention_mask, segment_mask) with base_layer.JaxContext.new_context( prng_key=prng_key, global_step=jnp.array(0, dtype=jnp.uint32)) as jax_context: jax_context.bind(transformer_layer, transformer_layer.vars_to_flax_vars(initial_vars)) inputs_normalized = transformer_layer.layer_norm.fprop(inputs) # Compute self-attention, key/value vectors are the input itself atten_output, _ = transformer_layer.self_attention.fprop( inputs_normalized, inputs_normalized, inputs_normalized, atten_mask=attention_mask) # Residual dropout and connection. atten_output = transformer_layer.residual_dropout.fprop(atten_output) atten_output += inputs # Normalize atten outputs using cross attention. atten_output_normalized = transformer_layer.cross_layer_norm.fprop( atten_output) inputs_normalized = test_utils.to_np(inputs_normalized) atten_output_normalized = test_utils.to_np(atten_output_normalized) self.assertAllClose( initial_vars.layer_norm.bias, inputs_normalized.mean(), atol=1e-3) self.assertAllClose( (1.0 + initial_vars.layer_norm.scale)**2, np.var(inputs_normalized), atol=5e-3) self.assertAllClose( initial_vars.cross_layer_norm.bias, atten_output_normalized.mean(), atol=1e-3) self.assertAllClose( (1.0 + initial_vars.cross_layer_norm.scale)**2, np.var(atten_output_normalized), atol=5e-3) def test_transformer_layer_cross_attention_dconv_value_error(self): p = transformers.Transformer.Params().Set( name='jax_transformer_layer', input_dims=8, hidden_dims=32, num_heads=4, cross_attention=True, mask_self_attention=True) # Enable cross attention. p.cross_atten_tpl = p.tr_atten_tpl.Copy() # Enable depth-wise convolution. p.cross_atten_tpl.dconv_qkv = True with self.assertRaises(ValueError): p.Instantiate() def test_transformer_layer_cross_attention_pos_emb_value_error(self): p = transformers.Transformer.Params().Set( name='jax_transformer_layer', input_dims=8, hidden_dims=32, num_heads=4, cross_attention=True, mask_self_attention=True) # Enable cross attention. p.cross_atten_tpl = p.tr_atten_tpl.Copy() # Enable rotary position embedding. p.cross_atten_tpl.use_rotary_position_emb = True with self.assertRaises(ValueError): p.Instantiate() @parameterized.parameters(*list(itertools.product([True, False], repeat=3))) def test_transformer_moe_dense_layer(self, mask_self_attention, packed_input, cross_attention): # Comparing scan over blocks of layers and regular loop block_p = transformers.StackedTransformer.Params().Set( name='transformer_block', num_layers=2, model_dims=3, hidden_dims=6, num_heads=1, mask_self_attention=mask_self_attention, packed_input=packed_input, cross_attention=cross_attention, num_experts=4, num_groups=1, moe_layers=[0]) block_p_repeated = transformers.StackedTransformerRepeated.Params().Set( name='stacked_transformer_layer_repeated', block=block_p.Copy(), x_times=1) stack_p = transformers.StackedTransformer.Params().Set( name='transformer_stack', num_layers=2, # moe + dense model_dims=block_p.model_dims, hidden_dims=block_p.hidden_dims, num_heads=block_p.num_heads, mask_self_attention=block_p.mask_self_attention, packed_input=block_p.packed_input, cross_attention=block_p.cross_attention, num_experts=block_p.num_experts, num_groups=block_p.num_groups, moe_layers=[0]) moe_p = stack_p.moe_layer_tpl moe_p.expert_capacity_dim = 2 moe_p.expert_capacity_factor = 0 moe_p = block_p.moe_layer_tpl moe_p.expert_capacity_dim = 2 moe_p.expert_capacity_factor = 0 transformer_block = block_p_repeated.Instantiate() transformer_stack = stack_p.Instantiate() seq_len = 4 batch_size = 3 prng_key = jax.random.PRNGKey(seed=123) block_initial_vars = transformer_block.instantiate_variables(prng_key) stack_initial_vars = transformer_stack.instantiate_variables(prng_key) npy_inputs = np.random.normal( 1.0, 0.5, [batch_size, seq_len, block_p.model_dims]).astype('float32') inputs = jnp.asarray(npy_inputs) npy_paddings = np.random.randint(0, 1, [batch_size, seq_len]).astype('float32') paddings = jnp.asarray(npy_paddings) segment_mask = None if packed_input: segment_ids = np.random.random_integers(0, 2, [batch_size, seq_len]) segment_mask = attentions.segment_mask(segment_ids, dtype=np.float32) cross_inputs = None cross_paddings = None cross_segment_mask = None if cross_attention: cross_seq_len = np.random.randint(10, 64) npy_cross_inputs = np.random.normal( 1.0, 0.5, [batch_size, cross_seq_len, block_p.model_dims]).astype('float32') cross_inputs = jnp.asarray(npy_cross_inputs) npy_cross_paddings = np.random.randint( 0, 1, [batch_size, cross_seq_len]).astype('float32') cross_paddings = jnp.asarray(npy_cross_paddings) if packed_input: source_segment_ids = np.random.random_integers( 0, 2, [batch_size, cross_seq_len]) cross_segment_mask = attentions.segment_mask( segment_ids, source_segment_ids, dtype=np.float32) block_outputs = test_utils.apply( transformer_block, block_initial_vars, transformer_block.fprop, inputs, paddings, segment_mask=segment_mask, cross_inputs=cross_inputs, cross_paddings=cross_paddings, cross_segment_mask=cross_segment_mask) stack_outputs = test_utils.apply( transformer_stack, stack_initial_vars, transformer_stack.fprop, inputs, paddings, segment_mask=segment_mask, cross_inputs=cross_inputs, cross_paddings=cross_paddings, cross_segment_mask=cross_segment_mask) _ = test_utils.to_np(block_outputs) _ = test_utils.to_np(stack_outputs) @parameterized.parameters(*list(itertools.product([True, False], repeat=3))) def test_stacked_transformer_layer(self, mask_self_attention, packed_input, cross_attention): p = transformers.StackedTransformer.Params().Set( name='jax_stacked_transformer_layer', model_dims=16, hidden_dims=64, num_heads=8, mask_self_attention=mask_self_attention, num_layers=4, packed_input=packed_input, cross_attention=cross_attention) seq_len = np.random.randint(10, 32) batch_size = 10 stacked_transformer_layer = p.Instantiate() prng_key = jax.random.PRNGKey(seed=123) initial_vars = stacked_transformer_layer.instantiate_variables(prng_key) # test conversion between vars and flax vars. pax_vars = stacked_transformer_layer.vars flax_vars = stacked_transformer_layer.flax_vars tf.nest.assert_same_structure( pax_vars, stacked_transformer_layer.flax_vars_to_vars(flax_vars)) tf.nest.assert_same_structure( flax_vars, stacked_transformer_layer.vars_to_flax_vars(pax_vars)) npy_inputs = np.random.normal( 1.0, 0.5, [batch_size, seq_len, p.model_dims]).astype('float32') inputs = jnp.asarray(npy_inputs) npy_paddings = np.random.randint(0, 1, [batch_size, seq_len]).astype('float32') paddings = jnp.asarray(npy_paddings) segment_mask = None tf_segment_mask = None if packed_input: segment_ids = np.random.random_integers(0, 2, [batch_size, seq_len]) segment_mask = attentions.segment_mask(segment_ids, dtype=np.float32) if mask_self_attention: tf_segment_mask = batch_major_attention.CausalSegmentMask( segment_ids, tf.float32) else: tf_segment_mask = batch_major_attention.SegmentMask( segment_ids, segment_ids) cross_inputs = None cross_paddings = None cross_segment_mask = None tf_cross_inputs = None tf_cross_paddings = None tf_cross_segment_mask = None if cross_attention: cross_seq_len = np.random.randint(10, 64) npy_cross_inputs = np.random.normal( 1.0, 0.5, [batch_size, cross_seq_len, p.model_dims]).astype('float32') cross_inputs = jnp.asarray(npy_cross_inputs) tf_cross_inputs = tf.constant(npy_cross_inputs, dtype=tf.float32) npy_cross_paddings = np.random.randint( 0, 1, [batch_size, cross_seq_len]).astype('float32') cross_paddings = jnp.asarray(npy_cross_paddings) tf_cross_paddings = tf.constant(npy_cross_paddings, dtype=tf.float32) if packed_input: source_segment_ids = np.random.random_integers( 0, 2, [batch_size, cross_seq_len]) cross_segment_mask = attentions.segment_mask( segment_ids, source_segment_ids, dtype=np.float32) tf_cross_segment_mask = batch_major_attention.SegmentMask( segment_ids, source_segment_ids) outputs = test_utils.apply( stacked_transformer_layer, initial_vars, stacked_transformer_layer.fprop, inputs, paddings, context_p=None, segment_mask=segment_mask, cross_inputs=cross_inputs, cross_paddings=cross_paddings, cross_segment_mask=cross_segment_mask) logging.info('initial_vars in transformer layer = %s', initial_vars) # Test whether tf Transformer layer returns same output # Modify initial_vars to use TF compatible params tf_initial_vars = py_utils.NestedMap() tf_initial_vars.x_layers = [] for jax_initial_vars in initial_vars.x_layers: tf_layer_vars = test_utils.replace_jax_attention_vars_to_tf( jax_initial_vars, cross_attention) tf_initial_vars.x_layers.append(tf_layer_vars) tf_initial_vars = test_utils.to_tf_nmap(tf_initial_vars) logging.info('tf_initial_vars in transformer layer = %s', initial_vars) tf_p = batch_major_attention.StackedTransformerLayers.Params().Set( name='tf_transformer_layer', mdl_dim=p.model_dims, hidden_dim=p.hidden_dims, num_atten_heads=p.num_heads, mask_self_atten=mask_self_attention, num_layers=p.num_layers, packed_input=packed_input, has_aux_atten=cross_attention) tf_p.transformer_layer_params_tpl.tr_fflayer_tpl.fflayer_tpl.batch_norm = ( False) tf_p.transformer_layer_params_tpl.tr_fflayer_tpl.fflayer_tpl.has_bias = True tf_stacked_transformer_layer = tf_p.Instantiate() tf_output, _ = tf_stacked_transformer_layer.FProp( tf_initial_vars, test_utils.to_tf_nmap(npy_inputs), paddings=test_utils.to_tf_nmap(npy_paddings), segment_mask=test_utils.to_tf_nmap(tf_segment_mask), aux_vec=test_utils.to_tf_nmap(tf_cross_inputs), aux_paddings=test_utils.to_tf_nmap(tf_cross_paddings), aux_segment_mask=test_utils.to_tf_nmap(tf_cross_segment_mask)) np_outputs = test_utils.to_np(outputs) tf_np_outputs = test_utils.to_np(tf_output) self.assertAllClose(tf_np_outputs, np_outputs, atol=1e-5) @parameterized.parameters(*list(itertools.product([True, False], repeat=3))) def test_repeated_stacked_xformer_layer(self, mask_self_attention, packed_input, cross_attention): model_dims = 16 p1 = transformers.StackedTransformer.Params().Set( name='jax_stacked_transformer_layer', model_dims=model_dims, hidden_dims=64, num_heads=8, mask_self_attention=mask_self_attention, num_layers=4, packed_input=packed_input, cross_attention=cross_attention) p1_one_layer = p1.Copy() p1_one_layer.num_layers = 1 p2 = transformers.StackedTransformerRepeated.Params().Set( name='jax_stacked_transformer_layer_repeated', block=p1_one_layer, x_times=p1.num_layers) seq_len = np.random.randint(10, 32) batch_size = 10 stacked_transformer_layer = p1.Instantiate() repeated_transformer_layer = p2.Instantiate() prng_key = jax.random.PRNGKey(seed=123) initial_vars = stacked_transformer_layer.instantiate_variables(prng_key) repeated_transformer_layer.instantiate_variable_configs() def _stack_vars(*args): args = [x[jnp.newaxis, :] for x in args] return jnp.vstack(args) stacked_vars = tf.nest.map_structure(_stack_vars, *initial_vars.x_layers) repeated_vars = py_utils.NestedMap( repeat=py_utils.NestedMap( sub=py_utils.NestedMap(x_layers=[stacked_vars]))) tf.nest.assert_same_structure( repeated_vars, repeated_transformer_layer.instantiate_variables(prng_key)) npy_inputs = np.random.normal( 1.0, 0.5, [batch_size, seq_len, model_dims]).astype('float32') inputs = jnp.asarray(npy_inputs) npy_paddings = np.random.randint(0, 1, [batch_size, seq_len]).astype('float32') paddings = jnp.asarray(npy_paddings) segment_mask = None if packed_input: segment_ids = np.random.random_integers(0, 2, [batch_size, seq_len]) segment_mask = attentions.segment_mask(segment_ids, dtype=np.float32) cross_inputs = None cross_paddings = None cross_segment_mask = None if cross_attention: cross_seq_len = np.random.randint(10, 64) npy_cross_inputs = np.random.normal( 1.0, 0.5, [batch_size, cross_seq_len, model_dims]).astype('float32') cross_inputs = jnp.asarray(npy_cross_inputs) npy_cross_paddings = np.random.randint( 0, 1, [batch_size, cross_seq_len]).astype('float32') cross_paddings = jnp.asarray(npy_cross_paddings) if packed_input: source_segment_ids = np.random.random_integers( 0, 2, [batch_size, cross_seq_len]) cross_segment_mask = attentions.segment_mask( segment_ids, source_segment_ids, dtype=np.float32) outputs = test_utils.apply( stacked_transformer_layer, initial_vars, stacked_transformer_layer.fprop, inputs, paddings, context_p=None, segment_mask=segment_mask, cross_inputs=cross_inputs, cross_paddings=cross_paddings, cross_segment_mask=cross_segment_mask) outputs_repeated = test_utils.apply( repeated_transformer_layer, repeated_vars, repeated_transformer_layer.fprop, inputs, paddings, context_p=None, segment_mask=segment_mask, cross_inputs=cross_inputs, cross_paddings=cross_paddings, cross_segment_mask=cross_segment_mask) self.assertAllClose(outputs, outputs_repeated, atol=1e-5) @parameterized.parameters(*list(itertools.product([True, False], repeat=5))) def test_stacked_transformer_layer_extendstep(self, packed_input, cross_attention, combine_qkv, dconv_qkv, use_rotary_position_emb): if cross_attention and combine_qkv: self.skipTest('combine_qkv optimization only works for self-attention.') layer_params = transformers.StackedTransformer.Params() num_layers = 2 model_dims = 8 p = layer_params.Set( name='jax_transformer_layer', model_dims=model_dims, hidden_dims=32, num_heads=2, mask_self_attention=True, packed_input=packed_input, cross_attention=cross_attention, num_layers=num_layers) p.transformer_layer_params_tpl.tr_atten_tpl.combine_qkv = combine_qkv p.transformer_layer_params_tpl.tr_atten_tpl.dconv_qkv = dconv_qkv p.transformer_layer_params_tpl.tr_atten_tpl.use_rotary_position_emb = ( use_rotary_position_emb) if cross_attention: p.transformer_layer_params_tpl.cross_atten_tpl = ( p.transformer_layer_params_tpl.tr_atten_tpl.Copy()) # Cross attention should not have depth-wise convolution. p.transformer_layer_params_tpl.cross_atten_tpl.dconv_qkv = False # Cross attention should not have rotary position embedding. p.transformer_layer_params_tpl.cross_atten_tpl.use_rotary_position_emb = ( False) p_copy = p.Copy() p_copy.num_layers = 1 p = transformers.StackedTransformerRepeated.Params() p.name = 'jax_transformer_repeated_layer' p.block = p_copy p.x_times = num_layers seq_len = 4 batch_size = 4 stacked_transformer_layer = p.Instantiate() prng_key = jax.random.PRNGKey(seed=123) initial_vars = stacked_transformer_layer.instantiate_variables(prng_key) npy_inputs = np.random.normal( 1.0, 0.5, [batch_size, seq_len, model_dims]).astype('float32') inputs = jnp.asarray(npy_inputs) npy_paddings = np.random.randint(0, 1, [batch_size, seq_len]).astype('float32') paddings = jnp.asarray(npy_paddings) attention_mask = attentions.convert_paddings_to_mask(paddings) segment_mask = None if packed_input: segment_ids = np.random.random_integers(0, 2, [batch_size, seq_len]) segment_mask = attentions.segment_mask(segment_ids, dtype=np.float32) cross_inputs = None cross_paddings = None cross_segment_mask = None if cross_attention: cross_seq_len = np.random.randint(10, 32) npy_cross_inputs = np.random.normal( 1.0, 0.5, [batch_size, cross_seq_len, model_dims]).astype('float32') cross_inputs = jnp.asarray(npy_cross_inputs) npy_cross_paddings = np.random.randint( 0, 1, [batch_size, cross_seq_len]).astype('float32') cross_paddings = jnp.asarray(npy_cross_paddings) if packed_input: source_segment_ids = np.random.random_integers( 0, 2, [batch_size, cross_seq_len]) cross_segment_mask = attentions.segment_mask( segment_ids, source_segment_ids, dtype=np.float32) prng_key = jax.random.PRNGKey(seed=123) global_step = jnp.array(0, dtype=jnp.uint64) with base_layer.JaxContext.new_context( prng_key=prng_key, global_step=global_step) as jax_context: jax_context.bind( stacked_transformer_layer, stacked_transformer_layer.vars_to_flax_vars(initial_vars)) fprop_outputs = stacked_transformer_layer.fprop( inputs, paddings, segment_mask=segment_mask, cross_inputs=cross_inputs, cross_paddings=cross_paddings, cross_segment_mask=cross_segment_mask) decoder_outputs = jnp.zeros(shape=[seq_len, batch_size, model_dims]) initial_states = stacked_transformer_layer.init_states( batch_size, seq_len) atten_states = initial_states for t in range(seq_len): segment_mask_t = attention_mask[:, :, t, :] cross_segment_mask_t = cross_segment_mask if segment_mask is not None: segment_mask_t = jnp.minimum(segment_mask_t, segment_mask[:, :, t, :]) if cross_segment_mask is not None: cross_segment_mask_t = cross_segment_mask[:, :, t, :] atten_states, encoded = stacked_transformer_layer.extend_step( atten_states, inputs=inputs[:, t, :], time_step=t, segment_mask=segment_mask_t, cross_inputs=cross_inputs, cross_paddings=cross_paddings, cross_segment_mask=cross_segment_mask_t) decoder_outputs = decoder_outputs.at[t].set(encoded) decoder_out_transposed = jnp.transpose(decoder_outputs, [1, 0, 2]) # TODO(lepikhin): remove noisy test logging # logging.info('initial_vars in transformer layer = %s', initial_vars) np_fprop_outputs = test_utils.to_np(fprop_outputs) np_decoder_outputs = test_utils.to_np(decoder_out_transposed) self.assertAllClose(np_fprop_outputs, np_decoder_outputs, atol=1e-5) @parameterized.parameters('RELU', 'SILU', 'GATED_SILU') def test_transformer_feedforward(self, activation_function): p = transformers.TransformerFeedForward.Params().Set( name='ffwd', input_dims=8, hidden_dims=32, activation=activation_function) batch_size = 8 seq_len = 512 ffwd = p.Instantiate() prng_key = jax.random.PRNGKey(seed=123) initial_vars = ffwd.instantiate_variables(prng_key) npy_inputs = np.random.normal( 1.0, 0.5, [batch_size, seq_len, p.input_dims]).astype('float32') inputs = jnp.asarray(npy_inputs) npy_paddings = np.zeros([batch_size, seq_len], dtype=np.float32) input_paddings = jnp.asarray(npy_paddings) with base_layer.JaxContext.new_context( prng_key=jax.random.PRNGKey(seed=1234), global_step=jnp.array(0, dtype=jnp.uint32)) as jax_context: jax_context.bind(ffwd, ffwd.vars_to_flax_vars(initial_vars)) outputs = ffwd.fprop(inputs, input_paddings) logging.info('outputs: %s', outputs) if activation_function.startswith('GATED_'): # Default lingvo layers_with_attention.TransformerFeedForwardLayer does # not support gating. return # Test whether Tensorflow TransformerFeedForwardLayer returns the same # output. Modify `initial_vars` to use TF compatible params. tf_initial_vars = test_utils.replace_jax_transformer_ffwd_vars_to_tf( initial_vars) tf_initial_vars = test_utils.to_tf_nmap(tf_initial_vars) logging.info('tf_initial_vars in transformer feedforward layer = %s', initial_vars) tf_p = layers_with_attention.TransformerFeedForwardLayer.Params().Set( name='tf_ffwd', input_dim=p.input_dims, hidden_dim=p.hidden_dims, activation=p.activation) tf_ffwd = tf_p.Instantiate() tf_output = tf_ffwd.FProp( tf_initial_vars, tf.constant(npy_inputs, dtype=tf.float32), paddings=test_utils.to_tf_nmap(npy_paddings)) np_outputs = test_utils.to_np(outputs) tf_np_outputs = test_utils.to_np(tf_output) self.assertAllClose(tf_np_outputs, np_outputs, atol=1e-5) @parameterized.parameters(['pre', 'primer_hybrid']) def test_transformer_layer_norm_policies(self, norm_policy): p = transformers.Transformer.Params().Set( name='jax_transformer_layer', input_dims=32, hidden_dims=128, num_heads=8, mask_self_attention=True, packed_input=True, cross_attention=False, norm_policy=norm_policy) seq_len = np.random.randint(10, 32) batch_size = 10 transformer_layer = p.Instantiate() prng_key = jax.random.PRNGKey(seed=123) initial_vars = transformer_layer.instantiate_variables(prng_key) npy_inputs = np.random.normal( 1.0, 0.5, [batch_size, seq_len, p.input_dims]).astype('float32') inputs = jnp.asarray(npy_inputs) npy_paddings = np.random.randint(0, 1, [batch_size, seq_len]).astype('float32') paddings = jnp.asarray(npy_paddings) attention_mask = attentions.convert_paddings_to_mask(paddings) causal_mask = attentions.causal_mask(inputs) attention_mask = jnp.minimum(attention_mask, causal_mask) segment_ids = np.random.random_integers(0, 2, [batch_size, seq_len]) segment_mask = attentions.segment_mask(segment_ids, dtype=np.float32) attention_mask = jnp.minimum(attention_mask, segment_mask) with base_layer.JaxContext.new_context( prng_key=prng_key, global_step=jnp.array(0, dtype=jnp.uint32)) as jax_context: jax_context.bind(transformer_layer, transformer_layer.vars_to_flax_vars(initial_vars)) outputs, _ = transformer_layer.fprop( inputs, paddings, attention_mask=attention_mask) logging.info('initial_vars in transformer layer = %s', initial_vars) np_outputs = test_utils.to_np(outputs) # Plumbing test. self.assertAllClose(np_outputs, np_outputs, atol=1e-5) @parameterized.parameters([True, False]) def test_transformer_relative_bias(self, use_relative_bias): p = transformers.Transformer.Params().Set( name='jax_transformer_layer', input_dims=32, hidden_dims=128, num_heads=8, mask_self_attention=True, packed_input=True, cross_attention=False) seq_len = np.random.randint(10, 32) batch_size = 10 if use_relative_bias: p.tr_atten_tpl.relative_bias_tpl = attentions.RelativeBias.Params().Set( relative_attention_num_buckets=2, relative_attention_max_distance=8) transformer_layer = p.Instantiate() prng_key = jax.random.PRNGKey(seed=123) initial_vars = transformer_layer.instantiate_variables(prng_key) npy_inputs = np.random.normal( 1.0, 0.5, [batch_size, seq_len, p.input_dims]).astype('float32') inputs = jnp.asarray(npy_inputs) npy_paddings = np.random.randint(0, 1, [batch_size, seq_len]).astype('float32') paddings = jnp.asarray(npy_paddings) attention_mask = attentions.convert_paddings_to_mask(paddings) causal_mask = attentions.causal_mask(inputs) attention_mask = jnp.minimum(attention_mask, causal_mask) segment_ids = np.random.random_integers(0, 2, [batch_size, seq_len]) segment_mask = attentions.segment_mask(segment_ids, dtype=np.float32) attention_mask = jnp.minimum(attention_mask, segment_mask) if use_relative_bias: segment_pos = np.random.randint(0, seq_len, [batch_size, seq_len]).astype('int32') segment_pos = jnp.asarray(segment_pos) else: segment_pos = None with base_layer.JaxContext.new_context( prng_key=prng_key, global_step=jnp.array(0, dtype=jnp.uint32)) as jax_context: jax_context.bind(transformer_layer, transformer_layer.vars_to_flax_vars(initial_vars)) outputs, _ = transformer_layer.fprop( inputs, paddings, attention_mask=attention_mask, segment_pos=segment_pos) logging.info('initial_vars in transformer layer = %s', initial_vars) np_outputs = test_utils.to_np(outputs) logging.info('np_outputs: %s', np_outputs) if use_relative_bias: self.assertAlmostEqual(np_outputs[0, 0, 1], 0.79015386, places=5) self.assertAlmostEqual(np_outputs[0, 1, 0], 0.48336178, places=5) # Plumbing test. self.assertAllClose(np_outputs, np_outputs, atol=1e-5) if __name__ == '__main__': absltest.main()
tensorflow/lingvo
lingvo/jax/layers/transformers_test.py
Python
apache-2.0
39,114
[ "MOE" ]
550c1d8036667922073ef0951e62773eaa9cab358a7663b877c02b93d3c6fa80
# coding: utf-8 # The MIT License (MIT) # Copyright (c) 2016 Sentry # Copyright (c) 2016 Łukasz Langa # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from __future__ import absolute_import import ast import pycodestyle from collections import namedtuple from functools import partial class SentryVisitor(ast.NodeVisitor): NODE_WINDOW_SIZE = 4 def __init__(self, filename, lines): self.errors = [] self.filename = filename self.lines = lines self.has_absolute_import = False self.node_stack = [] self.node_window = [] def finish(self): if not self.has_absolute_import: self.errors.append( B003(1, 1), ) def visit(self, node): self.node_stack.append(node) self.node_window.append(node) self.node_window = self.node_window[-self.NODE_WINDOW_SIZE:] super(SentryVisitor, self).visit(node) self.node_stack.pop() def visit_ExceptHandler(self, node): if node.type is None: self.errors.append( B001(node.lineno, node.col_offset) ) self.generic_visit(node) def visit_ImportFrom(self, node): if node.module in B307.names: self.errors.append( B307(node.lineno, node.col_offset) ) if node.module == '__future__': for nameproxy in node.names: if nameproxy.name == 'absolute_import': self.has_absolute_import = True break def visit_Import(self, node): for alias in node.names: if alias.name.split('.', 1)[0] in B307.names: self.errors.append( B307(node.lineno, node.col_offset) ) def visit_Call(self, node): if isinstance(node.func, ast.Attribute): for bug in (B301, B302, B305): if node.func.attr in bug.methods: call_path = '.'.join(self.compose_call_path(node.func.value)) if call_path not in bug.valid_paths: self.errors.append( bug(node.lineno, node.col_offset) ) break for bug in (B312,): if node.func.attr in bug.methods: call_path = '.'.join(self.compose_call_path(node.func.value)) if call_path in bug.invalid_paths: self.errors.append( bug(node.lineno, node.col_offset) ) break self.generic_visit(node) def visit_Attribute(self, node): call_path = list(self.compose_call_path(node)) if '.'.join(call_path) == 'sys.maxint': self.errors.append( B304(node.lineno, node.col_offset) ) elif len(call_path) == 2 and call_path[1] == 'message': name = call_path[0] for elem in reversed(self.node_stack[:-1]): if isinstance(elem, ast.ExceptHandler) and elem.name == name: self.errors.append( B306(node.lineno, node.col_offset) ) break if node.attr in B101.methods: self.errors.append( B101( message="B101: Avoid using the {} mock call as it is " "confusing and prone to causing invalid test " "behavior.".format(node.attr), lineno=node.lineno, col=node.col_offset, ), ) def visit_Assign(self, node): # TODO(dcramer): pretty sure these aren't working correctly on Python2 if isinstance(self.node_stack[-2], ast.ClassDef): # note: by hasattr belowe we're ignoring starred arguments, slices # and tuples for simplicity. assign_targets = {t.id for t in node.targets if hasattr(t, 'id')} if '__metaclass__' in assign_targets: self.errors.append( B303(node.lineno, node.col_offset) ) if '__unicode__' in assign_targets: self.errors.append( B313(node.lineno, node.col_offset) ) self.generic_visit(node) def visit_Name(self, node): for bug in (B308, B309, B310, B311): if node.id in bug.names: self.errors.append( bug( lineno=node.lineno, col=node.col_offset, ), ) def compose_call_path(self, node): if isinstance(node, ast.Attribute): for item in self.compose_call_path(node.value): yield item yield node.attr elif isinstance(node, ast.Name): yield node.id class SentryCheck(object): name = 'sentry-checker' def __init__(self, tree, filename=None, lines=None): self.tree = tree self.filename = filename self.lines = lines self.visitor = SentryVisitor def run(self): if not self.tree or not self.lines: self.load_file() visitor = self.visitor( filename=self.filename, lines=self.lines, ) visitor.visit(self.tree) visitor.finish() for e in visitor.errors: try: if pycodestyle.noqa(self.lines[e.lineno - 1]): continue except IndexError: pass yield e def load_file(self): """ Loads the file in a way that auto-detects source encoding and deals with broken terminal encodings for stdin. Stolen from flake8_import_order because it's good. """ if self.filename in ("stdin", "-", None): self.filename = "stdin" self.lines = pycodestyle.stdin_get_value().splitlines(True) else: self.lines = pycodestyle.readlines(self.filename) if not self.tree: self.tree = ast.parse("".join(self.lines)) # def run(self): # visitor = Py2to3Visitor() # visitor.visit(self.tree) # for code, lineno, name in visitor.errors: # yield lineno, 0, self.codes[code], type(self) error = namedtuple('error', 'lineno col message type') B001 = partial( error, message="B001: Do not use bare `except:`, it also catches unexpected " "events like memory errors, interrupts, system exit, and so on. " "Prefer `except Exception:`. If you're sure what you're doing, " "be explicit and write `except BaseException:`.", type=SentryCheck, ) B002 = partial( error, message="B002: Python does not support the unary prefix increment. Writing " "++n is equivalent to +(+(n)), which equals n. You meant n += 1.", type=SentryCheck, ) B003 = partial( error, message="B003: Missing `from __future__ import absolute_import`", type=SentryCheck, ) B101 = partial( error, type=SentryCheck) B101.methods = {'assert_calls', 'assert_not_called', 'assert_called', 'assert_called_once', 'not_called', 'called_once', 'called_once_with'} # Those could be false positives but it's more dangerous to let them slip # through if they're not. B301 = partial( error, message="B301: Python 3 does not include .iter* methods on dictionaries. " "Use `six.iter*` or `future.utils.iter*` instead.", type=SentryCheck, ) B301.methods = {'iterkeys', 'itervalues', 'iteritems', 'iterlists'} B301.valid_paths = {'six', 'future.utils', 'builtins'} B302 = partial( error, message="B302: Python 3 does not include .view* methods on dictionaries. " "Remove the ``view`` prefix from the method name. Use `six.view*` " "or `future.utils.view*` instead.", type=SentryCheck, ) B302.methods = {'viewkeys', 'viewvalues', 'viewitems', 'viewlists'} B302.valid_paths = {'six', 'future.utils', 'builtins'} B303 = partial( error, message="B303: __metaclass__ does not exist in Python 3. Use " "use `@six.add_metaclass()` instead.", type=SentryCheck, ) B304 = partial( error, message="B304: sys.maxint does not exist in Python 3. Use `sys.maxsize`.", type=SentryCheck, ) B305 = partial( error, message="B305: .next() does not exist in Python 3. Use ``six.next()`` " "instead.", type=SentryCheck, ) B305.methods = {'next'} B305.valid_paths = {'six', 'future.utils', 'builtins'} B306 = partial( error, message="B306: ``BaseException.message`` has been deprecated as of Python " "2.6 and is removed in Python 3. Use ``str(e)`` to access the " "user-readable message. Use ``e.args`` to access arguments passed " "to the exception.", type=SentryCheck, ) B307 = partial( error, message="B307: Python 3 has combined urllib, urllib2, and urlparse into " "a single library. For Python 2 compatibility, utilize the " "six.moves.urllib module.", type=SentryCheck) B307.names = {'urllib', 'urlib2', 'urlparse'} B308 = partial( error, message="B308: The usage of ``str`` differs between Python 2 and 3. Use " "``six.binary_type`` instead.", type=SentryCheck, ) B308.names = {'str'} B309 = partial( error, message="B309: ``unicode`` does not exist in Python 3. Use " "``six.text_type`` instead.", type=SentryCheck, ) B309.names = {'unicode'} B310 = partial( error, message="B310: ``basestring`` does not exist in Python 3. Use " "``six.string_types`` instead.", type=SentryCheck, ) B310.names = {'basestring'} B311 = partial( error, message="B311: ``long`` should not be used. Use int instead, and allow " "Python to deal with handling large integers.", type=SentryCheck, ) B311.names = {'long'} B312 = partial( error, message="B312: ``cgi.escape`` and ``html.escape`` should not be used. Use " "sentry.utils.html.escape instead.", type=SentryCheck, ) B312.methods = {'escape'} B312.invalid_paths = {'cgi', 'html'} B313 = partial( error, message="B313: ``__unicode__`` should not be defined on classes. Define " "just ``__str__`` returning a unicode text string, and use the " "sentry.utils.compat.implements_to_string class decorator.", type=SentryCheck, )
alexm92/sentry
src/sentry/lint/sentry_check.py
Python
bsd-3-clause
11,683
[ "VisIt" ]
2b6decb5ed1857dbe681bcad9748eb64362ddca162b455799e2da4560060db6d
""" Module for I/O """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import json import os from builtins import super import numpy as np from astropy.table import Table from scipy.interpolate import CubicSpline from . import __path__ DATA_PATH = os.path.join(__path__[0], 'data') def numpify_dict(d): """ Recursively make lists in a dictionary into numpy array """ def numpify(d): for k, v in d.items(): if isinstance(v, list): d[k] = np.array(v) elif isinstance(v, dict): numpify(v) new_dict = d.copy() numpify(new_dict) return new_dict class Params(dict): """ Input parameters """ def __init__(self, ifile='ne2001_params.json', path=None, **new_params): """ """ if path is None: path = DATA_PATH self.path = path self.ifile = ifile try: params = numpify_dict(parse_json(os.path.join(self.path, self.ifile))) params['spiral_arms']['adict'] = init_spiral_arms() except IOError: params = {} params.update(new_params) super().__init__(params) def parse_json(json_file): "Parse json file" with open(json_file, 'rt') as json_data: data = json.load(json_data) return data def read_galparam(ifile='gal_param.json'): """ Read Galaxy parameters Parameters ---------- ifile : str, optional Returns ------- gal_param : dict """ old_param = parse_json(os.path.join(DATA_PATH, ifile)) gal_param = {} gal_param['thick_disk'] = dict(e_density=(old_param['n1h1'] / old_param['h1']), height=old_param['h1'], radius=old_param['A1'], F=old_param['F1']) gal_param['thin_disk'] = dict(e_density=old_param['n2'], height=old_param['h2'], radius=old_param['A2'], F=old_param['F2']) return gal_param def read_gc(ifile='ne_gc.json'): """ Read Galactic Center parameters Returns ------- gc_param : dict dict of parameters """ old_param = parse_json(os.path.join(DATA_PATH, ifile)) gc_param = {} gc_param['galactic_center'] = dict(e_density=old_param['negc0'], center=tuple(old_param['centroid']. values()), F=old_param['Fgc0'], height=old_param['hgc'], radius=old_param['rgc']) return gc_param def read_lism(ifile='ne_lism.json'): """ Parameters ---------- ifile : str, optional Returns ------- lism_dict : dict """ # Read with open(os.path.join(DATA_PATH, ifile), 'rt') as fh: lism_dict = json.load(fh) # Return return lism_dict def init_spiral_arms(ifile='ne_arms_log_mod.inp'): armsinp = os.path.join(DATA_PATH, ifile) # logarms = DATA_PATH + 'log_arms.out' narms = 5 # integer armmap(5) ! for remapping from Wainscoat # data armmap/1, 3, 4, 2, 5/ ! order to TC93 order, which is # ! from GC outwards toward Sun. armmap = [1, 3, 4, 2, 5] NNj = [20, 20, 20, 20, 20] narmpoints = 500 ncoord = 2 NNmax = 20 rad = 180/np.pi # Arms arms_tbl = Table.read(armsinp, format='ascii') # a, rmin, thmin, extent assert len(arms_tbl) == narms r1 = np.zeros((NNmax, narms)) th1 = np.zeros((NNmax, narms)) kmax = np.zeros(narms).astype(int) arm = np.zeros((narms, narmpoints, ncoord)) for j, row in enumerate(arms_tbl): th1[0:NNj[j], j] = (row['thmin'] + np.arange(NNj[j])*row['extent']/(NNj[j]-1.)) # rad r1[:, j] = row['rmin'] * np.exp((th1[:, j]-row['thmin'])/row['a']) th1[:, j] *= rad # ! deg # c *** begin sculpting spiral arm 2 == TC arm 3*** if armmap[j] == 3: cut1 = (th1[:, j] > 370.) & (th1[:, j] <= 410.) r1[cut1, j] *= (1. + 0.04 * np.cos((th1[cut1, j]-390.)*180 / (40.*rad))) # c . (1. + 0.01*cos((th1(n,j)-390.)*180./(40.*rad))) cut2 = (th1[:, j] > 315.) & (th1[:, j] <= 370.) r1[cut2, j] *= (1. - 0.07 * np.cos((th1[cut2, j]-345.)*180 / (55.*rad))) # c . (1.0 - 0.08*cos((th1(n,j)-345.)*180./(55.*rad))) cut3 = (th1[:, j] > 180.) & (th1[:, j] <= 315.) r1[cut3, j] *= (1 + 0.16 * np.cos((th1[cut3, j]-260.)*180 / (135.*rad))) # (1 + 0.13* np.cos((th1[cut3,j]-260.)*180./(135.*rad))) # c *** begin sculpting spiral arm 4 == TC arm 2*** if armmap[j] == 2: cut1 = (th1[:, j] > 290.) & (th1[:, j] <= 395.) r1[cut1, j] *= (1. - 0.11 * np.cos((th1[cut1, j]-350.)*180 / (105.*rad))) # c *** end arm sculpting *** """ open(11,file=logarms, status='unknown') write(11,*) 'arm n xa ya' """ # do 21 j=1,narms for j in range(narms): dth = 5.0/r1[0, j] # Python indexing th = th1[0, j]-0.999*dth # Generate spline cspline = CubicSpline(th1[:NNj[j], j], r1[:NNj[j], j]) # call cspline(th1(1,j),r1(1,j),-NNj(j),th,r) # for k in range(narmpoints): # do 10 k=1,narmpoints-1 th = th + dth * np.arange(narmpoints) gd_th = np.where(th <= th1[NNj[j]-1, j])[0] kmax[j] = np.max(gd_th) + 1 # Python indexing (we will use arange) r = cspline(th[gd_th]) # x,y of each arm arm[j, gd_th, 0] = -r*np.sin(th[gd_th]/rad) # Python indexing arm[j, gd_th, 1] = r*np.cos(th[gd_th]/rad) # Wrap into a dict arms_dict = {} arms_dict['table'] = arms_tbl arms_dict['r1'] = r1 arms_dict['th1'] = r1 arms_dict['kmax'] = kmax arms_dict['narms'] = narms arms_dict['narmpoints'] = narmpoints arms_dict['armmap'] = armmap arms_dict['arm'] = arm return arms_dict
benbaror/ne2001
src/ne2001/ne_io.py
Python
bsd-2-clause
6,520
[ "Galaxy" ]
e7d3ec9b1ffe058b0eb24759866fb6b2068410374337dae4d96b8e1b71a4afc9
"""Configuration dictionary for submitting jobs mode = queue # this defines whether jobs are immediately run or queued user.name = jkitchin user.email = jkitchin@andrew.cmu.edu queue.command = qsub queue.options = -joe queue.walltime = 168:00:00 queue.nodes = 1 queue.ppn = 1 queue.mem = 2GB queue.jobname = None check for $HOME/.jasprc then check for ./.jasprc Note that the environment variables VASP_SERIAL and VASP_PARALLEL can also be used to identify the vasp executables used by runjasp.py. """ import os # default settings JASPRC = {'vasp.executable.serial': '/home-research/zhongnanxu/opt/vasp-5.3.5/bin/vasp-vtst-beef-serial', 'vasp.executable.parallel': '/home-research/zhongnanxu/opt/vasp-5.3.5/bin/vasp-vtst-beef-parallel', 'mode': 'queue', # other value is 'run' 'queue.command': 'qsub', 'queue.options': '-joe', 'queue.walltime': '168:00:00', 'queue.nodes': 1, 'queue.ppn': 1, 'queue.mem': '2GB', 'queue.jobname': 'None', 'multiprocessing.cores_per_process': 'None', 'vdw_kernel.bindat': '/opt/kitchingroup/vasp-5.3.5/vdw_kernel.bindat', 'restart_unconverged': True } def read_configuration(fname): """Reads jasprc configuration from fname.""" f = open(fname) for line in f: line = line.strip() if line.startswith('#'): pass # comment elif line == '': pass else: if '#' in line: # take the part before the first # line = line.split('#')[0] key, value = line.split('=') JASPRC[key.strip()] = value.strip() # these are the possible paths to config files, in order of increasing # priority config_files = [os.path.join(os.environ['HOME'], '.jasprc'), '.jasprc'] for cf in config_files: if os.path.exists(cf): read_configuration(cf)
jkitchin/jasp
jasp/jasprc.py
Python
gpl-2.0
1,973
[ "VASP" ]
f3e63e90e764593fae4f9f7d3fb792771c93bd9a56a864bcf9e155db9e41d3b3
import pandas as pd from sklearn.ensemble import RandomForestRegressor from sklearn.ensemble import BaggingRegressor from sklearn.ensemble import ExtraTreesRegressor from sklearn.ensemble import AdaBoostRegressor from sklearn.ensemble import GradientBoostingRegressor from sklearn.ensemble import RandomTreesEmbedding from sklearn.neural_network import MLPRegressor from sklearn.linear_model import ElasticNet from sklearn.gaussian_process import GaussianProcessRegressor from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import Imputer from sklearn import metrics import numpy as np def get_gaussian_process_regressor(): gp = GaussianProcessRegressor() return [gp],['Gaussian Process'] def get_mlp_regressor(num_hidden_units=51): mlp = MLPRegressor(hidden_layer_sizes=num_hidden_units) return [mlp],['Multi-Layer Perceptron'] def get_ensemble_models(): rf = RandomForestRegressor(n_estimators=51,min_samples_leaf=5,min_samples_split=3,random_state=42) bag = BaggingRegressor(n_estimators=51,random_state=42) extra = ExtraTreesRegressor(n_estimators=71,random_state=42) ada = AdaBoostRegressor(random_state=42) grad = GradientBoostingRegressor(n_estimators=101,random_state=42) classifier_list = [rf,bag,extra,ada,grad] classifier_name_list = ['Random Forests','Bagging','Extra Trees','AdaBoost','Gradient Boost'] return classifier_list, classifier_name_list def get_linear_model(): elastic_net = ElasticNet() return [elastic_net],['Elastic Net'] def print_evaluation_metrics(trained_model,trained_model_name,X_test,y_test): print '--------- For Model : ', trained_model_name ,' ---------\n' predicted_values = trained_model.predict(X_test) print "Mean Absolute Error : ", metrics.mean_absolute_error(y_test,predicted_values) print "Median Absolute Error : ", metrics.median_absolute_error(y_test,predicted_values) print "Mean Squared Error : ", metrics.mean_squared_error(y_test,predicted_values) print "R2 Score : ", metrics.r2_score(y_test,predicted_values) print "---------------------------------------\n" def label_encode_frame(dataframe): columns = dataframe.columns encoder = LabelEncoder() for column in columns: if type(dataframe[column][0]) is np.nan: for i in range(len(dataframe)): if i > 1000: break if type(dataframe[column][i]) is str: dataframe[column] = encoder.fit_transform(dataframe[column].values) break elif type(dataframe[column][0]) is str: dataframe[column] = encoder.fit_transform(dataframe[column].values) return dataframe filename = 'train.csv' merc_frame = pd.read_csv(filename) regression_values = list(merc_frame['y'].values) del merc_frame['ID'] del merc_frame['y'] encoded_frame = label_encode_frame(merc_frame) X_train,X_test,y_train,y_test = train_test_split(encoded_frame.values,regression_values,test_size=0.1,random_state=42) regressor_list,regressor_name_list = get_ensemble_models() for regressor,regressor_name in zip(regressor_list,regressor_name_list): regressor.fit(X_train,y_train) print_evaluation_metrics(regressor,regressor_name,X_test,y_test)
rupakc/Kaggle-Compendium
Mercedez-Benz Greener Manufacturing/merc-baseline.py
Python
mit
3,323
[ "Gaussian" ]
f36251c572857f3662f0937dbdf487b8fff5b3256ada6bab40682fa26502904e
#! /usr/bin/python # -*- coding: utf8 -*- import tensorflow as tf import time from . import visualize from . import utils from . import files from . import cost from . import iterate from . import ops import numpy as np from six.moves import xrange import random, warnings import copy import inspect # __all__ = [ # "Layer", # "DenseLayer", # ] # set_keep = locals() set_keep = globals() set_keep['_layers_name_list'] =[] set_keep['name_reuse'] = False try: # For TF12 and later TF_GRAPHKEYS_VARIABLES = tf.GraphKeys.GLOBAL_VARIABLES except: # For TF11 and before TF_GRAPHKEYS_VARIABLES = tf.GraphKeys.VARIABLES ## Variable Operation def flatten_reshape(variable, name=''): """Reshapes high-dimension input to a vector. [batch_size, mask_row, mask_col, n_mask] ---> [batch_size, mask_row * mask_col * n_mask] Parameters ---------- variable : a tensorflow variable name : a string or None An optional name to attach to this layer. Examples -------- >>> W_conv2 = weight_variable([5, 5, 100, 32]) # 64 features for each 5x5 patch >>> b_conv2 = bias_variable([32]) >>> W_fc1 = weight_variable([7 * 7 * 32, 256]) >>> h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2) >>> h_pool2 = max_pool_2x2(h_conv2) >>> h_pool2.get_shape()[:].as_list() = [batch_size, 7, 7, 32] ... [batch_size, mask_row, mask_col, n_mask] >>> h_pool2_flat = tl.layers.flatten_reshape(h_pool2) ... [batch_size, mask_row * mask_col * n_mask] >>> h_pool2_flat_drop = tf.nn.dropout(h_pool2_flat, keep_prob) ... """ dim = 1 for d in variable.get_shape()[1:].as_list(): dim *= d return tf.reshape(variable, shape=[-1, dim], name=name) def clear_layers_name(): """Clear all layer names in set_keep['_layers_name_list'], enable layer name reuse. Examples --------- >>> network = tl.layers.InputLayer(x, name='input_layer') >>> network = tl.layers.DenseLayer(network, n_units=800, name='relu1') ... >>> tl.layers.clear_layers_name() >>> network2 = tl.layers.InputLayer(x, name='input_layer') >>> network2 = tl.layers.DenseLayer(network2, n_units=800, name='relu1') ... """ set_keep['_layers_name_list'] =[] def set_name_reuse(enable=True): """Enable or disable reuse layer name. By default, each layer must has unique name. When you want two or more input placeholder (inference) share the same model parameters, you need to enable layer name reuse, then allow the parameters have same name scope. Parameters ------------ enable : boolean, enable name reuse. (None means False). Examples ------------ >>> def embed_seq(input_seqs, is_train, reuse): >>> with tf.variable_scope("model", reuse=reuse): >>> tl.layers.set_name_reuse(reuse) >>> network = tl.layers.EmbeddingInputlayer( ... inputs = input_seqs, ... vocabulary_size = vocab_size, ... embedding_size = embedding_size, ... name = 'e_embedding') >>> network = tl.layers.DynamicRNNLayer(network, ... cell_fn = tf.nn.rnn_cell.BasicLSTMCell, ... n_hidden = embedding_size, ... dropout = (0.7 if is_train else None), ... initializer = w_init, ... sequence_length = tl.layers.retrieve_seq_length_op2(input_seqs), ... return_last = True, ... name = 'e_dynamicrnn',) >>> return network >>> >>> net_train = embed_seq(t_caption, is_train=True, reuse=False) >>> net_test = embed_seq(t_caption, is_train=False, reuse=True) - see ``tutorial_ptb_lstm.py`` for example. """ set_keep['name_reuse'] = enable def initialize_rnn_state(state): """Return the initialized RNN state. The input is LSTMStateTuple or State of RNNCells. Parameters ----------- state : a RNN state. """ try: # TF1.0 LSTMStateTuple = tf.contrib.rnn.LSTMStateTuple except: LSTMStateTuple = tf.nn.rnn_cell.LSTMStateTuple if isinstance(state, LSTMStateTuple): c = state.c.eval() h = state.h.eval() return (c, h) else: new_state = state.eval() return new_state def print_all_variables(train_only=False): """Print all trainable and non-trainable variables without tl.layers.initialize_global_variables(sess) Parameters ---------- train_only : boolean If True, only print the trainable variables, otherwise, print all variables. """ # tvar = tf.trainable_variables() if train_only else tf.all_variables() if train_only: t_vars = tf.trainable_variables() print(" [*] printing trainable variables") else: try: # TF1.0 t_vars = tf.global_variables() except: # TF0.12 t_vars = tf.all_variables() print(" [*] printing global variables") for idx, v in enumerate(t_vars): print(" var {:3}: {:15} {}".format(idx, str(v.get_shape()), v.name)) def get_variables_with_name(name, train_only=True, printable=False): """Get variable list by a given name scope. Examples --------- >>> dense_vars = tl.layers.get_variable_with_name('dense', True, True) """ print(" [*] geting variables with %s" % name) # tvar = tf.trainable_variables() if train_only else tf.all_variables() if train_only: t_vars = tf.trainable_variables() else: try: # TF1.0 t_vars = tf.global_variables() except: # TF0.12 t_vars = tf.all_variables() d_vars = [var for var in t_vars if name in var.name] if printable: for idx, v in enumerate(d_vars): print(" got {:3}: {:15} {}".format(idx, v.name, str(v.get_shape()))) return d_vars def get_layers_with_name(network=None, name="", printable=False): """Get layer list in a network by a given name scope. Examples --------- >>> layers = tl.layers.get_layers_with_name(network, "CNN", True) """ assert network is not None print(" [*] geting layers with %s" % name) layers = [] i = 0 for layer in network.all_layers: # print(type(layer.name)) if name in layer.name: layers.append(layer) if printable: # print(layer.name) print(" got {:3}: {:15} {}".format(i, layer.name, str(layer.get_shape()))) i = i + 1 return layers def list_remove_repeat(l=None): """Remove the repeated items in a list, and return the processed list. You may need it to create merged layer like Concat, Elementwise and etc. Parameters ---------- l : a list Examples --------- >>> l = [2, 3, 4, 2, 3] >>> l = list_remove_repeat(l) ... [2, 3, 4] """ l2 = [] [l2.append(i) for i in l if not i in l2] return l2 def initialize_global_variables(sess=None): """Excute ``sess.run(tf.global_variables_initializer())`` for TF12+ or sess.run(tf.initialize_all_variables()) for TF11. Parameters ---------- sess : a Session """ assert sess is not None try: # TF12 sess.run(tf.global_variables_initializer()) except: # TF11 sess.run(tf.initialize_all_variables()) ## Basic layer class Layer(object): """ The :class:`Layer` class represents a single layer of a neural network. It should be subclassed when implementing new types of layers. Because each layer can keep track of the layer(s) feeding into it, a network's output :class:`Layer` instance can double as a handle to the full network. Parameters ---------- inputs : a :class:`Layer` instance The `Layer` class feeding into this layer. name : a string or None An optional name to attach to this layer. """ def __init__( self, inputs = None, name ='layer' ): self.inputs = inputs scope_name=tf.get_variable_scope().name if scope_name: name = scope_name + '/' + name if (name in set_keep['_layers_name_list']) and name_reuse == False: raise Exception("Layer '%s' already exists, please choice other 'name' or reuse this layer\ \nHint : Use different name for different 'Layer' (The name is used to control parameter sharing)" % name) else: self.name = name if name not in ['', None, False]: set_keep['_layers_name_list'].append(name) def print_params(self, details=True): ''' Print all info of parameters in the network''' for i, p in enumerate(self.all_params): if details: try: # print(" param {:3}: {:15} (mean: {:<18}, median: {:<18}, std: {:<18}) {}".format(i, str(p.eval().shape), p.eval().mean(), np.median(p.eval()), p.eval().std(), p.name)) val = p.eval() print(" param {:3}: {:20} {:15} {} (mean: {:<18}, median: {:<18}, std: {:<18}) ".format(i, p.name, str(val.shape), p.dtype.name, val.mean(), np.median(val), val.std())) except Exception as e: print(str(e)) raise Exception("Hint: print params details after tl.layers.initialize_global_variables(sess) or use network.print_params(False).") else: print(" param {:3}: {:20} {:15} {}".format(i, p.name, str(p.get_shape()), p.dtype.name)) print(" num of params: %d" % self.count_params()) def print_layers(self): ''' Print all info of layers in the network ''' for i, layer in enumerate(self.all_layers): # print(" layer %d: %s" % (i, str(layer))) print(" layer {:3}: {:20} {:15} {}".format(i, layer.name, str(layer.get_shape()), layer.dtype.name)) def count_params(self): ''' Return the number of parameters in the network ''' n_params = 0 for i, p in enumerate(self.all_params): n = 1 # for s in p.eval().shape: for s in p.get_shape(): try: s = int(s) except: s = 1 if s: n = n * s n_params = n_params + n return n_params def __str__(self): # print("\nIt is a Layer class") # self.print_params(False) # self.print_layers() return " Last layer is: %s" % self.__class__.__name__ ## Input layer class InputLayer(Layer): """ The :class:`InputLayer` class is the starting layer of a neural network. Parameters ---------- inputs : a placeholder or tensor The input tensor data. name : a string or None An optional name to attach to this layer. """ def __init__( self, inputs = None, name ='input_layer' ): Layer.__init__(self, inputs=inputs, name=name) print(" [TL] InputLayer %s: %s" % (self.name, inputs.get_shape())) self.outputs = inputs self.all_layers = [] self.all_params = [] self.all_drop = {} ## OneHot layer class OneHotInputLayer(Layer): """ The :class:`OneHotInputLayer` class is the starting layer of a neural network, see ``tf.one_hot``. Parameters ---------- inputs : a placeholder or tensor The input tensor data. name : a string or None An optional name to attach to this layer. depth : If the input indices is rank N, the output will have rank N+1. The new axis is created at dimension axis (default: the new axis is appended at the end). on_value : If on_value is not provided, it will default to the value 1 with type dtype. default, None off_value : If off_value is not provided, it will default to the value 0 with type dtype. default, None axis : default, None dtype : default, None """ def __init__( self, inputs = None, depth = None, on_value = None, off_value = None, axis = None, dtype=None, name ='input_layer' ): Layer.__init__(self, inputs=inputs, name=name) assert depth != None, "depth is not given" print(" [TL]:Instantiate OneHotInputLayer %s: %s" % (self.name, inputs.get_shape())) self.outputs = tf.one_hot(inputs, depth, on_value=on_value, off_value=off_value, axis=axis, dtype=dtype) self.all_layers = [] self.all_params = [] self.all_drop = {} ## Word Embedding Input layer class Word2vecEmbeddingInputlayer(Layer): """ The :class:`Word2vecEmbeddingInputlayer` class is a fully connected layer, for Word Embedding. Words are input as integer index. The output is the embedded word vector. Parameters ---------- inputs : placeholder For word inputs. integer index format. train_labels : placeholder For word labels. integer index format. vocabulary_size : int The size of vocabulary, number of words. embedding_size : int The number of embedding dimensions. num_sampled : int The Number of negative examples for NCE loss. nce_loss_args : a dictionary The arguments for tf.nn.nce_loss() E_init : embedding initializer The initializer for initializing the embedding matrix. E_init_args : a dictionary The arguments for embedding initializer nce_W_init : NCE decoder biases initializer The initializer for initializing the nce decoder weight matrix. nce_W_init_args : a dictionary The arguments for initializing the nce decoder weight matrix. nce_b_init : NCE decoder biases initializer The initializer for tf.get_variable() of the nce decoder bias vector. nce_b_init_args : a dictionary The arguments for tf.get_variable() of the nce decoder bias vector. name : a string or None An optional name to attach to this layer. Variables -------------- nce_cost : a tensor The NCE loss. outputs : a tensor The outputs of embedding layer. normalized_embeddings : tensor Normalized embedding matrix Examples -------- - Without TensorLayer : see tensorflow/examples/tutorials/word2vec/word2vec_basic.py >>> train_inputs = tf.placeholder(tf.int32, shape=[batch_size]) >>> train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1]) >>> embeddings = tf.Variable( ... tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0)) >>> embed = tf.nn.embedding_lookup(embeddings, train_inputs) >>> nce_weights = tf.Variable( ... tf.truncated_normal([vocabulary_size, embedding_size], ... stddev=1.0 / math.sqrt(embedding_size))) >>> nce_biases = tf.Variable(tf.zeros([vocabulary_size])) >>> cost = tf.reduce_mean( ... tf.nn.nce_loss(weights=nce_weights, biases=nce_biases, ... inputs=embed, labels=train_labels, ... num_sampled=num_sampled, num_classes=vocabulary_size, ... num_true=1)) - With TensorLayer : see tutorial_word2vec_basic.py >>> train_inputs = tf.placeholder(tf.int32, shape=[batch_size]) >>> train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1]) >>> emb_net = tl.layers.Word2vecEmbeddingInputlayer( ... inputs = train_inputs, ... train_labels = train_labels, ... vocabulary_size = vocabulary_size, ... embedding_size = embedding_size, ... num_sampled = num_sampled, ... name ='word2vec_layer', ... ) >>> cost = emb_net.nce_cost >>> train_params = emb_net.all_params >>> train_op = tf.train.GradientDescentOptimizer(learning_rate).minimize( ... cost, var_list=train_params) >>> normalized_embeddings = emb_net.normalized_embeddings References ---------- - `tensorflow/examples/tutorials/word2vec/word2vec_basic.py <https://github.com/tensorflow/tensorflow/blob/r0.7/tensorflow/examples/tutorials/word2vec/word2vec_basic.py>`_ """ def __init__( self, inputs = None, train_labels = None, vocabulary_size = 80000, embedding_size = 200, num_sampled = 64, nce_loss_args = {}, E_init = tf.random_uniform_initializer(minval=-1.0, maxval=1.0), E_init_args = {}, nce_W_init = tf.truncated_normal_initializer(stddev=0.03), nce_W_init_args = {}, nce_b_init = tf.constant_initializer(value=0.0), nce_b_init_args = {}, name ='word2vec_layer', ): Layer.__init__(self, name=name) self.inputs = inputs print(" [TL] Word2vecEmbeddingInputlayer %s: (%d, %d)" % (self.name, vocabulary_size, embedding_size)) # Look up embeddings for inputs. # Note: a row of 'embeddings' is the vector representation of a word. # for the sake of speed, it is better to slice the embedding matrix # instead of transfering a word id to one-hot-format vector and then # multiply by the embedding matrix. # embed is the outputs of the hidden layer (embedding layer), it is a # row vector with 'embedding_size' values. with tf.variable_scope(name) as vs: embeddings = tf.get_variable(name='embeddings', shape=(vocabulary_size, embedding_size), initializer=E_init, **E_init_args) embed = tf.nn.embedding_lookup(embeddings, self.inputs) # Construct the variables for the NCE loss (i.e. negative sampling) nce_weights = tf.get_variable(name='nce_weights', shape=(vocabulary_size, embedding_size), initializer=nce_W_init, **nce_W_init_args) nce_biases = tf.get_variable(name='nce_biases', shape=(vocabulary_size), initializer=nce_b_init, **nce_b_init_args) # Compute the average NCE loss for the batch. # tf.nce_loss automatically draws a new sample of the negative labels # each time we evaluate the loss. self.nce_cost = tf.reduce_mean( tf.nn.nce_loss(weights=nce_weights, biases=nce_biases, inputs=embed, labels=train_labels, num_sampled=num_sampled, num_classes=vocabulary_size, **nce_loss_args)) self.outputs = embed self.normalized_embeddings = tf.nn.l2_normalize(embeddings, 1) self.all_layers = [self.outputs] self.all_params = [embeddings, nce_weights, nce_biases] self.all_drop = {} class EmbeddingInputlayer(Layer): """ The :class:`EmbeddingInputlayer` class is a fully connected layer, for Word Embedding. Words are input as integer index. The output is the embedded word vector. If you have a pre-train matrix, you can assign the matrix into it. To train a word embedding matrix, you can used class:`Word2vecEmbeddingInputlayer`. Note that, do not update this embedding matrix. Parameters ---------- inputs : placeholder For word inputs. integer index format. a 2D tensor : [batch_size, num_steps(num_words)] vocabulary_size : int The size of vocabulary, number of words. embedding_size : int The number of embedding dimensions. E_init : embedding initializer The initializer for initializing the embedding matrix. E_init_args : a dictionary The arguments for embedding initializer name : a string or None An optional name to attach to this layer. Variables ------------ outputs : a tensor The outputs of embedding layer. the outputs 3D tensor : [batch_size, num_steps(num_words), embedding_size] Examples -------- >>> vocabulary_size = 50000 >>> embedding_size = 200 >>> model_file_name = "model_word2vec_50k_200" >>> batch_size = None ... >>> all_var = tl.files.load_npy_to_any(name=model_file_name+'.npy') >>> data = all_var['data']; count = all_var['count'] >>> dictionary = all_var['dictionary'] >>> reverse_dictionary = all_var['reverse_dictionary'] >>> tl.files.save_vocab(count, name='vocab_'+model_file_name+'.txt') >>> del all_var, data, count ... >>> load_params = tl.files.load_npz(name=model_file_name+'.npz') >>> x = tf.placeholder(tf.int32, shape=[batch_size]) >>> y_ = tf.placeholder(tf.int32, shape=[batch_size, 1]) >>> emb_net = tl.layers.EmbeddingInputlayer( ... inputs = x, ... vocabulary_size = vocabulary_size, ... embedding_size = embedding_size, ... name ='embedding_layer') >>> tl.layers.initialize_global_variables(sess) >>> tl.files.assign_params(sess, [load_params[0]], emb_net) >>> word = b'hello' >>> word_id = dictionary[word] >>> print('word_id:', word_id) ... 6428 ... >>> words = [b'i', b'am', b'hao', b'dong'] >>> word_ids = tl.files.words_to_word_ids(words, dictionary) >>> context = tl.files.word_ids_to_words(word_ids, reverse_dictionary) >>> print('word_ids:', word_ids) ... [72, 1226, 46744, 20048] >>> print('context:', context) ... [b'i', b'am', b'hao', b'dong'] ... >>> vector = sess.run(emb_net.outputs, feed_dict={x : [word_id]}) >>> print('vector:', vector.shape) ... (1, 200) >>> vectors = sess.run(emb_net.outputs, feed_dict={x : word_ids}) >>> print('vectors:', vectors.shape) ... (4, 200) """ def __init__( self, inputs = None, vocabulary_size = 80000, embedding_size = 200, E_init = tf.random_uniform_initializer(-0.1, 0.1), E_init_args = {}, name ='embedding_layer', ): Layer.__init__(self, name=name) self.inputs = inputs print(" [TL] EmbeddingInputlayer %s: (%d, %d)" % (self.name, vocabulary_size, embedding_size)) with tf.variable_scope(name) as vs: embeddings = tf.get_variable(name='embeddings', shape=(vocabulary_size, embedding_size), initializer=E_init, **E_init_args) embed = tf.nn.embedding_lookup(embeddings, self.inputs) self.outputs = embed self.all_layers = [self.outputs] self.all_params = [embeddings] self.all_drop = {} ## Dense layer class DenseLayer(Layer): """ The :class:`DenseLayer` class is a fully connected layer. Parameters ---------- layer : a :class:`Layer` instance The `Layer` class feeding into this layer. n_units : int The number of units of the layer. act : activation function The function that is applied to the layer activations. W_init : weights initializer The initializer for initializing the weight matrix. b_init : biases initializer or None The initializer for initializing the bias vector. If None, skip biases. W_init_args : dictionary The arguments for the weights tf.get_variable. b_init_args : dictionary The arguments for the biases tf.get_variable. name : a string or None An optional name to attach to this layer. Examples -------- >>> network = tl.layers.InputLayer(x, name='input_layer') >>> network = tl.layers.DenseLayer( ... network, ... n_units=800, ... act = tf.nn.relu, ... W_init=tf.truncated_normal_initializer(stddev=0.1), ... name ='relu_layer' ... ) >>> Without TensorLayer, you can do as follow. >>> W = tf.Variable( ... tf.random_uniform([n_in, n_units], -1.0, 1.0), name='W') >>> b = tf.Variable(tf.zeros(shape=[n_units]), name='b') >>> y = tf.nn.relu(tf.matmul(inputs, W) + b) Notes ----- If the input to this layer has more than two axes, it need to flatten the input by using :class:`FlattenLayer` in this case. """ def __init__( self, layer = None, n_units = 100, act = tf.identity, W_init = tf.truncated_normal_initializer(stddev=0.1), b_init = tf.constant_initializer(value=0.0), W_init_args = {}, b_init_args = {}, name ='dense_layer', ): Layer.__init__(self, name=name) self.inputs = layer.outputs if self.inputs.get_shape().ndims != 2: raise Exception("The input dimension must be rank 2, please reshape or flatten it") n_in = int(self.inputs.get_shape()[-1]) self.n_units = n_units print(" [TL] DenseLayer %s: %d %s" % (self.name, self.n_units, act.__name__)) with tf.variable_scope(name) as vs: W = tf.get_variable(name='W', shape=(n_in, n_units), initializer=W_init, **W_init_args ) if b_init is not None: try: b = tf.get_variable(name='b', shape=(n_units), initializer=b_init, **b_init_args ) except: # If initializer is a constant, do not specify shape. b = tf.get_variable(name='b', initializer=b_init, **b_init_args ) self.outputs = act(tf.matmul(self.inputs, W) + b) else: self.outputs = act(tf.matmul(self.inputs, W)) # Hint : list(), dict() is pass by value (shallow), without them, it is # pass by reference. self.all_layers = list(layer.all_layers) self.all_params = list(layer.all_params) self.all_drop = dict(layer.all_drop) self.all_layers.extend( [self.outputs] ) if b_init is not None: self.all_params.extend( [W, b] ) else: self.all_params.extend( [W] ) class ReconLayer(DenseLayer): """ The :class:`ReconLayer` class is a reconstruction layer `DenseLayer` which use to pre-train a `DenseLayer`. Parameters ---------- layer : a :class:`Layer` instance The `Layer` class feeding into this layer. x_recon : tensorflow variable The variables used for reconstruction. name : a string or None An optional name to attach to this layer. n_units : int The number of units of the layer, should be equal to x_recon act : activation function The activation function that is applied to the reconstruction layer. Normally, for sigmoid layer, the reconstruction activation is sigmoid; for rectifying layer, the reconstruction activation is softplus. Examples -------- >>> network = tl.layers.InputLayer(x, name='input_layer') >>> network = tl.layers.DenseLayer(network, n_units=196, ... act=tf.nn.sigmoid, name='sigmoid1') >>> recon_layer1 = tl.layers.ReconLayer(network, x_recon=x, n_units=784, ... act=tf.nn.sigmoid, name='recon_layer1') >>> recon_layer1.pretrain(sess, x=x, X_train=X_train, X_val=X_val, ... denoise_name=None, n_epoch=1200, batch_size=128, ... print_freq=10, save=True, save_name='w1pre_') Methods ------- pretrain(self, sess, x, X_train, X_val, denoise_name=None, n_epoch=100, batch_size=128, print_freq=10, save=True, save_name='w1pre_') Start to pre-train the parameters of previous DenseLayer. Notes ----- The input layer should be `DenseLayer` or a layer has only one axes. You may need to modify this part to define your own cost function. By default, the cost is implemented as follow: - For sigmoid layer, the implementation can be `UFLDL <http://deeplearning.stanford.edu/wiki/index.php/UFLDL_Tutorial>`_ - For rectifying layer, the implementation can be `Glorot (2011). Deep Sparse Rectifier Neural Networks <http://doi.org/10.1.1.208.6449>`_ """ def __init__( self, layer = None, x_recon = None, name = 'recon_layer', n_units = 784, act = tf.nn.softplus, ): DenseLayer.__init__(self, layer=layer, n_units=n_units, act=act, name=name) print(" [TL] %s is a ReconLayer" % self.name) # y : reconstruction outputs; train_params : parameters to train # Note that: train_params = [W_encoder, b_encoder, W_decoder, b_encoder] y = self.outputs self.train_params = self.all_params[-4:] # ===================================================================== # # You need to modify the below cost function and optimizer so as to # implement your own pre-train method. # # ===================================================================== lambda_l2_w = 0.004 learning_rate = 0.0001 print(" lambda_l2_w: %f" % lambda_l2_w) print(" learning_rate: %f" % learning_rate) # Mean-squre-error i.e. quadratic-cost mse = tf.reduce_sum(tf.squared_difference(y, x_recon), 1) mse = tf.reduce_mean(mse) # in theano: mse = ((y - x) ** 2 ).sum(axis=1).mean() # mse = tf.reduce_mean(tf.reduce_sum(tf.square(tf.sub(y, x_recon)), 1)) # mse = tf.reduce_mean(tf.squared_difference(y, x_recon)) # <haodong>: Error # mse = tf.sqrt(tf.reduce_mean(tf.square(y - x_recon))) # <haodong>: Error # Cross-entropy # ce = cost.cross_entropy(y, x_recon) # <haodong>: list , list , Error (only be used for softmax output) # ce = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y, x_recon)) # <haodong>: list , list , Error (only be used for softmax output) # ce = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(y, x_recon)) # <haodong>: list , index , Error (only be used for softmax output) L2_w = tf.contrib.layers.l2_regularizer(lambda_l2_w)(self.train_params[0]) \ + tf.contrib.layers.l2_regularizer(lambda_l2_w)(self.train_params[2]) # faster than the code below # L2_w = lambda_l2_w * tf.reduce_mean(tf.square(self.train_params[0])) + lambda_l2_w * tf.reduce_mean( tf.square(self.train_params[2])) # DropNeuro P_o = cost.lo_regularizer(0.03)(self.train_params[0]) # + cost.lo_regularizer(0.5)(self.train_params[2]) # <haodong>: if add lo on decoder, no neuron will be broken P_i = cost.li_regularizer(0.03)(self.train_params[0]) # + cost.li_regularizer(0.001)(self.train_params[2]) # L1 of activation outputs activation_out = self.all_layers[-2] L1_a = 0.001 * tf.reduce_mean(activation_out) # <haodong>: theano: T.mean( self.a[i] ) # some neuron are broken, white and black # L1_a = 0.001 * tf.reduce_mean( tf.reduce_sum(activation_out, 0) ) # <haodong>: some neuron are broken, white and black # L1_a = 0.001 * 100 * tf.reduce_mean( tf.reduce_sum(activation_out, 1) ) # <haodong>: some neuron are broken, white and black # KL Divergence beta = 4 rho = 0.15 p_hat = tf.reduce_mean(activation_out, 0) # theano: p_hat = T.mean( self.a[i], axis=0 ) try: ## TF1.0 KLD = beta * tf.reduce_sum( rho * tf.log(tf.divide(rho, p_hat)) + (1- rho) * tf.log((1- rho)/ (tf.subtract(float(1), p_hat))) ) except: ## TF0.12 KLD = beta * tf.reduce_sum( rho * tf.log(tf.div(rho, p_hat)) + (1- rho) * tf.log((1- rho)/ (tf.sub(float(1), p_hat))) ) # KLD = beta * tf.reduce_sum( rho * tf.log(rho/ p_hat) + (1- rho) * tf.log((1- rho)/(1- p_hat)) ) # theano: L1_a = l1_a[i] * T.sum( rho[i] * T.log(rho[i]/ p_hat) + (1- rho[i]) * T.log((1- rho[i])/(1- p_hat)) ) # Total cost if act == tf.nn.softplus: print(' use: mse, L2_w, L1_a') self.cost = mse + L1_a + L2_w elif act == tf.nn.sigmoid: # ---------------------------------------------------- # Cross-entropy was used in Denoising AE # print(' use: ce, L2_w, KLD') # self.cost = ce + L2_w + KLD # ---------------------------------------------------- # Mean-squared-error was used in Vanilla AE print(' use: mse, L2_w, KLD') self.cost = mse + L2_w + KLD # ---------------------------------------------------- # Add DropNeuro penalty (P_o) can remove neurons of AE # print(' use: mse, L2_w, KLD, P_o') # self.cost = mse + L2_w + KLD + P_o # ---------------------------------------------------- # Add DropNeuro penalty (P_i) can remove neurons of previous layer # If previous layer is InputLayer, it means remove useless features # print(' use: mse, L2_w, KLD, P_i') # self.cost = mse + L2_w + KLD + P_i else: raise Exception("Don't support the given reconstruct activation function") self.train_op = tf.train.AdamOptimizer(learning_rate, beta1=0.9, beta2=0.999, epsilon=1e-08, use_locking=False).minimize(self.cost, var_list=self.train_params) # self.train_op = tf.train.GradientDescentOptimizer(1.0).minimize(self.cost, var_list=self.train_params) def pretrain(self, sess, x, X_train, X_val, denoise_name=None, n_epoch=100, batch_size=128, print_freq=10, save=True, save_name='w1pre_'): # ==================================================== # # You need to modify the cost function in __init__() so as to # get your own pre-train method. # # ==================================================== print(" [*] %s start pretrain" % self.name) print(" batch_size: %d" % batch_size) if denoise_name: print(" denoising layer keep: %f" % self.all_drop[set_keep[denoise_name]]) dp_denoise = self.all_drop[set_keep[denoise_name]] else: print(" no denoising layer") for epoch in range(n_epoch): start_time = time.time() for X_train_a, _ in iterate.minibatches(X_train, X_train, batch_size, shuffle=True): dp_dict = utils.dict_to_one( self.all_drop ) if denoise_name: dp_dict[set_keep[denoise_name]] = dp_denoise feed_dict = {x: X_train_a} feed_dict.update(dp_dict) sess.run(self.train_op, feed_dict=feed_dict) if epoch + 1 == 1 or (epoch + 1) % print_freq == 0: print("Epoch %d of %d took %fs" % (epoch + 1, n_epoch, time.time() - start_time)) train_loss, n_batch = 0, 0 for X_train_a, _ in iterate.minibatches(X_train, X_train, batch_size, shuffle=True): dp_dict = utils.dict_to_one( self.all_drop ) feed_dict = {x: X_train_a} feed_dict.update(dp_dict) err = sess.run(self.cost, feed_dict=feed_dict) train_loss += err n_batch += 1 print(" train loss: %f" % (train_loss/ n_batch)) val_loss, n_batch = 0, 0 for X_val_a, _ in iterate.minibatches(X_val, X_val, batch_size, shuffle=True): dp_dict = utils.dict_to_one( self.all_drop ) feed_dict = {x: X_val_a} feed_dict.update(dp_dict) err = sess.run(self.cost, feed_dict=feed_dict) val_loss += err n_batch += 1 print(" val loss: %f" % (val_loss/ n_batch)) if save: try: visualize.W(self.train_params[0].eval(), second=10, saveable=True, shape=[28,28], name=save_name+str(epoch+1), fig_idx=2012) files.save_npz([self.all_params[0]] , name=save_name+str(epoch+1)+'.npz') except: raise Exception("You should change the visualize.W() in ReconLayer.pretrain(), if you want to save the feature images for different dataset") ## Noise layer class DropoutLayer(Layer): """ The :class:`DropoutLayer` class is a noise layer which randomly set some values to zero by a given keeping probability. Parameters ---------- layer : a :class:`Layer` instance The `Layer` class feeding into this layer. keep : float The keeping probability, the lower more values will be set to zero. is_fix : boolean Default False, if True, the keeping probability is fixed and cannot be changed via feed_dict. is_train : boolean If False, skip this layer, default is True. seed : int or None An integer or None to create random seed. name : a string or None An optional name to attach to this layer. Examples -------- - Define network >>> network = tl.layers.InputLayer(x, name='input_layer') >>> network = tl.layers.DropoutLayer(network, keep=0.8, name='drop1') >>> network = tl.layers.DenseLayer(network, n_units=800, act = tf.nn.relu, name='relu1') >>> ... - For training, enable dropout as follow. >>> feed_dict = {x: X_train_a, y_: y_train_a} >>> feed_dict.update( network.all_drop ) # enable noise layers >>> sess.run(train_op, feed_dict=feed_dict) >>> ... - For testing, disable dropout as follow. >>> dp_dict = tl.utils.dict_to_one( network.all_drop ) # disable noise layers >>> feed_dict = {x: X_val_a, y_: y_val_a} >>> feed_dict.update(dp_dict) >>> err, ac = sess.run([cost, acc], feed_dict=feed_dict) >>> ... Notes ------- - A frequent question regarding :class:`DropoutLayer` is that why it donot have `is_train` like :class:`BatchNormLayer`. In many simple cases, user may find it is better to use one inference instead of two inferences for training and testing seperately, :class:`DropoutLayer` allows you to control the dropout rate via `feed_dict`. However, you can fix the keeping probability by setting `is_fix` to True. """ def __init__( self, layer = None, keep = 0.5, is_fix = False, is_train = True, seed = None, name = 'dropout_layer', ): Layer.__init__(self, name=name) if is_train is False: print(" [TL] skip DropoutLayer") self.outputs = layer.outputs self.all_layers = list(layer.all_layers) self.all_params = list(layer.all_params) self.all_drop = dict(layer.all_drop) else: self.inputs = layer.outputs print(" [TL] DropoutLayer %s: keep:%f is_fix:%s" % (self.name, keep, is_fix)) # The name of placeholder for keep_prob is the same with the name # of the Layer. if is_fix: self.outputs = tf.nn.dropout(self.inputs, keep, seed=seed, name=name) else: set_keep[name] = tf.placeholder(tf.float32) self.outputs = tf.nn.dropout(self.inputs, set_keep[name], seed=seed, name=name) # 1.2 self.all_layers = list(layer.all_layers) self.all_params = list(layer.all_params) self.all_drop = dict(layer.all_drop) if is_fix is False: self.all_drop.update( {set_keep[name]: keep} ) self.all_layers.extend( [self.outputs] ) # print(set_keep[name]) # Tensor("Placeholder_2:0", dtype=float32) # print(denoising1) # Tensor("Placeholder_2:0", dtype=float32) # print(self.all_drop[denoising1]) # 0.8 # # https://www.tensorflow.org/versions/r0.8/tutorials/mnist/tf/index.html # The optional feed_dict argument allows the caller to override the # value of tensors in the graph. Each key in feed_dict can be one of # the following types: # If the key is a Tensor, the value may be a Python scalar, string, # list, or numpy ndarray that can be converted to the same dtype as that # tensor. Additionally, if the key is a placeholder, the shape of the # value will be checked for compatibility with the placeholder. # If the key is a SparseTensor, the value should be a SparseTensorValue. class GaussianNoiseLayer(Layer): """ The :class:`GaussianNoiseLayer` class is noise layer that adding noise with normal distribution to the activation. Parameters ------------ layer : a :class:`Layer` instance The `Layer` class feeding into this layer. mean : float stddev : float is_train : boolean If False, skip this layer, default is True. seed : int or None An integer or None to create random seed. name : a string or None An optional name to attach to this layer. """ def __init__( self, layer = None, mean = 0.0, stddev = 1.0, is_train = True, seed = None, name = 'gaussian_noise_layer', ): Layer.__init__(self, name=name) if is_train is False: print(" [TL] skip GaussianNoiseLayer") self.outputs = layer.outputs self.all_layers = list(layer.all_layers) self.all_params = list(layer.all_params) self.all_drop = dict(layer.all_drop) else: self.inputs = layer.outputs print(" [TL] GaussianNoiseLayer %s: mean:%f stddev:%f" % (self.name, mean, stddev)) with tf.variable_scope(name) as vs: # noise = np.random.normal(0.0 , sigma , tf.to_int64(self.inputs).get_shape()) noise = tf.random_normal(shape = self.inputs.get_shape(), mean=mean, stddev=stddev, seed=seed) self.outputs = self.inputs + noise self.all_layers = list(layer.all_layers) self.all_params = list(layer.all_params) self.all_drop = dict(layer.all_drop) class DropconnectDenseLayer(Layer): """ The :class:`DropconnectDenseLayer` class is ``DenseLayer`` with DropConnect behaviour which randomly remove connection between this layer to previous layer by a given keeping probability. Parameters ---------- layer : a :class:`Layer` instance The `Layer` class feeding into this layer. keep : float The keeping probability, the lower more values will be set to zero. n_units : int The number of units of the layer. act : activation function The function that is applied to the layer activations. W_init : weights initializer The initializer for initializing the weight matrix. b_init : biases initializer The initializer for initializing the bias vector. W_init_args : dictionary The arguments for the weights tf.get_variable(). b_init_args : dictionary The arguments for the biases tf.get_variable(). name : a string or None An optional name to attach to this layer. Examples -------- >>> network = tl.layers.InputLayer(x, name='input_layer') >>> network = tl.layers.DropconnectDenseLayer(network, keep = 0.8, ... n_units=800, act = tf.nn.relu, name='dropconnect_relu1') >>> network = tl.layers.DropconnectDenseLayer(network, keep = 0.5, ... n_units=800, act = tf.nn.relu, name='dropconnect_relu2') >>> network = tl.layers.DropconnectDenseLayer(network, keep = 0.5, ... n_units=10, act = tl.activation.identity, name='output_layer') References ---------- - `Wan, L. (2013). Regularization of neural networks using dropconnect <http://machinelearning.wustl.edu/mlpapers/papers/icml2013_wan13>`_ """ def __init__( self, layer = None, keep = 0.5, n_units = 100, act = tf.identity, W_init = tf.truncated_normal_initializer(stddev=0.1), b_init = tf.constant_initializer(value=0.0), W_init_args = {}, b_init_args = {}, name ='dropconnect_layer', ): Layer.__init__(self, name=name) self.inputs = layer.outputs if self.inputs.get_shape().ndims != 2: raise Exception("The input dimension must be rank 2") n_in = int(self.inputs.get_shape()[-1]) self.n_units = n_units print(" [TL] DropconnectDenseLayer %s: %d %s" % (self.name, self.n_units, act.__name__)) with tf.variable_scope(name) as vs: W = tf.get_variable(name='W', shape=(n_in, n_units), initializer=W_init, **W_init_args ) b = tf.get_variable(name='b', shape=(n_units), initializer=b_init, **b_init_args ) self.outputs = act(tf.matmul(self.inputs, W) + b)#, name=name) # 1.2 set_keep[name] = tf.placeholder(tf.float32) W_dropcon = tf.nn.dropout(W, set_keep[name]) self.outputs = act(tf.matmul(self.inputs, W_dropcon) + b) self.all_layers = list(layer.all_layers) self.all_params = list(layer.all_params) self.all_drop = dict(layer.all_drop) self.all_drop.update( {set_keep[name]: keep} ) self.all_layers.extend( [self.outputs] ) self.all_params.extend( [W, b] ) ## Convolutional layer (Pro) class Conv1dLayer(Layer): """ The :class:`Conv1dLayer` class is a 1D CNN layer, see `tf.nn.convolution <https://www.tensorflow.org/api_docs/python/tf/nn/convolution>`_. Parameters ---------- layer : a :class:`Layer` instance The `Layer` class feeding into this layer, [batch, in_width, in_channels]. act : activation function, None for identity. shape : list of shape shape of the filters, [filter_length, in_channels, out_channels]. stride : an int. The number of entries by which the filter is moved right at each step. dilation_rate : an int. Specifies the filter upsampling/input downsampling rate. padding : a string from: "SAME", "VALID". The type of padding algorithm to use. use_cudnn_on_gpu : An optional bool. Defaults to True. data_format : As it is 1D conv, default is 'NWC'. W_init : weights initializer The initializer for initializing the weight matrix. b_init : biases initializer or None The initializer for initializing the bias vector. If None, skip biases. W_init_args : dictionary The arguments for the weights tf.get_variable(). b_init_args : dictionary The arguments for the biases tf.get_variable(). name : a string or None An optional name to attach to this layer. """ def __init__( self, layer = None, act = tf.identity, shape = [5, 1, 5], stride = 1, dilation_rate = 1, padding='SAME', use_cudnn_on_gpu=None, data_format='NWC', W_init = tf.truncated_normal_initializer(stddev=0.02), b_init = tf.constant_initializer(value=0.0), W_init_args = {}, b_init_args = {}, name ='cnn_layer', ): Layer.__init__(self, name=name) self.inputs = layer.outputs print(" [TL] Conv1dLayer %s: shape:%s stride:%s pad:%s act:%s" % (self.name, str(shape), str(stride), padding, act.__name__)) if act is None: act = tf.identity with tf.variable_scope(name) as vs: W = tf.get_variable(name='W_conv1d', shape=shape, initializer=W_init, **W_init_args ) self.outputs = tf.nn.convolution( self.inputs, W, strides=(stride,), padding=padding, dilation_rate=(dilation_rate,), data_format=data_format ) #1.2 if b_init: b = tf.get_variable(name='b_conv1d', shape=(shape[-1]), initializer=b_init, **b_init_args ) self.outputs = self.outputs + b self.outputs = act(self.outputs) self.all_layers = list(layer.all_layers) self.all_params = list(layer.all_params) self.all_drop = dict(layer.all_drop) self.all_layers.extend( [self.outputs] ) if b_init: self.all_params.extend( [W, b] ) else: self.all_params.extend( [W] ) class Conv2dLayer(Layer): """ The :class:`Conv2dLayer` class is a 2D CNN layer, see `tf.nn.conv2d <https://www.tensorflow.org/versions/master/api_docs/python/nn.html#conv2d>`_. Parameters ---------- layer : a :class:`Layer` instance The `Layer` class feeding into this layer. act : activation function The function that is applied to the layer activations. shape : list of shape shape of the filters, [filter_height, filter_width, in_channels, out_channels]. strides : a list of ints. The stride of the sliding window for each dimension of input.\n It Must be in the same order as the dimension specified with format. padding : a string from: "SAME", "VALID". The type of padding algorithm to use. W_init : weights initializer The initializer for initializing the weight matrix. b_init : biases initializer or None The initializer for initializing the bias vector. If None, skip biases. W_init_args : dictionary The arguments for the weights tf.get_variable(). b_init_args : dictionary The arguments for the biases tf.get_variable(). use_cudnn_on_gpu : an optional string from: "NHWC", "NCHW". Defaults to "NHWC". data_format : an optional bool. Defaults to True. name : a string or None An optional name to attach to this layer. Notes ------ - shape = [h, w, the number of output channel of previous layer, the number of output channels] - the number of output channel of a layer is its last dimension. Examples -------- >>> x = tf.placeholder(tf.float32, shape=[None, 28, 28, 1]) >>> network = tl.layers.InputLayer(x, name='input_layer') >>> network = tl.layers.Conv2dLayer(network, ... act = tf.nn.relu, ... shape = [5, 5, 1, 32], # 32 features for each 5x5 patch ... strides=[1, 1, 1, 1], ... padding='SAME', ... W_init=tf.truncated_normal_initializer(stddev=5e-2), ... W_init_args={}, ... b_init = tf.constant_initializer(value=0.0), ... b_init_args = {}, ... name ='cnn_layer1') # output: (?, 28, 28, 32) >>> network = tl.layers.PoolLayer(network, ... ksize=[1, 2, 2, 1], ... strides=[1, 2, 2, 1], ... padding='SAME', ... pool = tf.nn.max_pool, ... name ='pool_layer1',) # output: (?, 14, 14, 32) >>> Without TensorLayer, you can implement 2d convolution as follow. >>> W = tf.Variable(W_init(shape=[5, 5, 1, 32], ), name='W_conv') >>> b = tf.Variable(b_init(shape=[32], ), name='b_conv') >>> outputs = tf.nn.relu( tf.nn.conv2d(inputs, W, ... strides=[1, 1, 1, 1], ... padding='SAME') + b ) """ def __init__( self, layer = None, act = tf.identity, shape = [5, 5, 1, 100], strides=[1, 1, 1, 1], padding='SAME', W_init = tf.truncated_normal_initializer(stddev=0.02), b_init = tf.constant_initializer(value=0.0), W_init_args = {}, b_init_args = {}, use_cudnn_on_gpu = None, data_format = None, name ='cnn_layer', ): Layer.__init__(self, name=name) self.inputs = layer.outputs print(" [TL] Conv2dLayer %s: shape:%s strides:%s pad:%s act:%s" % (self.name, str(shape), str(strides), padding, act.__name__)) with tf.variable_scope(name) as vs: W = tf.get_variable(name='W_conv2d', shape=shape, initializer=W_init, **W_init_args ) if b_init: b = tf.get_variable(name='b_conv2d', shape=(shape[-1]), initializer=b_init, **b_init_args ) self.outputs = act( tf.nn.conv2d(self.inputs, W, strides=strides, padding=padding, use_cudnn_on_gpu=use_cudnn_on_gpu, data_format=data_format) + b ) else: self.outputs = act( tf.nn.conv2d(self.inputs, W, strides=strides, padding=padding, use_cudnn_on_gpu=use_cudnn_on_gpu, data_format=data_format)) self.all_layers = list(layer.all_layers) self.all_params = list(layer.all_params) self.all_drop = dict(layer.all_drop) self.all_layers.extend( [self.outputs] ) if b_init: self.all_params.extend( [W, b] ) else: self.all_params.extend( [W] ) class DeConv2dLayer(Layer): """ The :class:`DeConv2dLayer` class is deconvolutional 2D layer, see `tf.nn.conv2d_transpose <https://www.tensorflow.org/versions/master/api_docs/python/nn.html#conv2d_transpose>`_. Parameters ---------- layer : a :class:`Layer` instance The `Layer` class feeding into this layer. act : activation function The function that is applied to the layer activations. shape : list of shape shape of the filters, [height, width, output_channels, in_channels], filter's in_channels dimension must match that of value. output_shape : list of output shape representing the output shape of the deconvolution op. strides : a list of ints. The stride of the sliding window for each dimension of the input tensor. padding : a string from: "SAME", "VALID". The type of padding algorithm to use. W_init : weights initializer The initializer for initializing the weight matrix. b_init : biases initializer The initializer for initializing the bias vector. If None, skip biases. W_init_args : dictionary The arguments for the weights initializer. b_init_args : dictionary The arguments for the biases initializer. name : a string or None An optional name to attach to this layer. Notes ----- - shape = [h, w, the number of output channels of this layer, the number of output channel of previous layer] - output_shape = [batch_size, any, any, the number of output channels of this layer] - the number of output channel of a layer is its last dimension. Examples --------- - A part of the generator in DCGAN example >>> batch_size = 64 >>> inputs = tf.placeholder(tf.float32, [batch_size, 100], name='z_noise') >>> net_in = tl.layers.InputLayer(inputs, name='g/in') >>> net_h0 = tl.layers.DenseLayer(net_in, n_units = 8192, ... W_init = tf.random_normal_initializer(stddev=0.02), ... act = tf.identity, name='g/h0/lin') >>> print(net_h0.outputs._shape) ... (64, 8192) >>> net_h0 = tl.layers.ReshapeLayer(net_h0, shape = [-1, 4, 4, 512], name='g/h0/reshape') >>> net_h0 = tl.layers.BatchNormLayer(net_h0, act=tf.nn.relu, is_train=is_train, name='g/h0/batch_norm') >>> print(net_h0.outputs._shape) ... (64, 4, 4, 512) >>> net_h1 = tl.layers.DeConv2dLayer(net_h0, ... shape = [5, 5, 256, 512], ... output_shape = [batch_size, 8, 8, 256], ... strides=[1, 2, 2, 1], ... act=tf.identity, name='g/h1/decon2d') >>> net_h1 = tl.layers.BatchNormLayer(net_h1, act=tf.nn.relu, is_train=is_train, name='g/h1/batch_norm') >>> print(net_h1.outputs._shape) ... (64, 8, 8, 256) - U-Net >>> .... >>> conv10 = tl.layers.Conv2dLayer(conv9, act=tf.nn.relu, ... shape=[3,3,1024,1024], strides=[1,1,1,1], padding='SAME', ... W_init=w_init, b_init=b_init, name='conv10') >>> print(conv10.outputs) ... (batch_size, 32, 32, 1024) >>> deconv1 = tl.layers.DeConv2dLayer(conv10, act=tf.nn.relu, ... shape=[3,3,512,1024], strides=[1,2,2,1], output_shape=[batch_size,64,64,512], ... padding='SAME', W_init=w_init, b_init=b_init, name='devcon1_1') """ def __init__( self, layer = None, act = tf.identity, shape = [3, 3, 128, 256], output_shape = [1, 256, 256, 128], strides = [1, 2, 2, 1], padding = 'SAME', W_init = tf.truncated_normal_initializer(stddev=0.02), b_init = tf.constant_initializer(value=0.0), W_init_args = {}, b_init_args = {}, name ='decnn2d_layer', ): Layer.__init__(self, name=name) self.inputs = layer.outputs print(" [TL] DeConv2dLayer %s: shape:%s out_shape:%s strides:%s pad:%s act:%s" % (self.name, str(shape), str(output_shape), str(strides), padding, act.__name__)) # print(" DeConv2dLayer: Untested") with tf.variable_scope(name) as vs: W = tf.get_variable(name='W_deconv2d', shape=shape, initializer=W_init, **W_init_args ) if b_init: b = tf.get_variable(name='b_deconv2d', shape=(shape[-2]), initializer=b_init, **b_init_args ) self.outputs = act( tf.nn.conv2d_transpose(self.inputs, W, output_shape=output_shape, strides=strides, padding=padding) + b ) else: self.outputs = act( tf.nn.conv2d_transpose(self.inputs, W, output_shape=output_shape, strides=strides, padding=padding)) self.all_layers = list(layer.all_layers) self.all_params = list(layer.all_params) self.all_drop = dict(layer.all_drop) self.all_layers.extend( [self.outputs] ) if b_init: self.all_params.extend( [W, b] ) else: self.all_params.extend( [W] ) class Conv3dLayer(Layer): """ The :class:`Conv3dLayer` class is a 3D CNN layer, see `tf.nn.conv3d <https://www.tensorflow.org/versions/master/api_docs/python/nn.html#conv3d>`_. Parameters ---------- layer : a :class:`Layer` instance The `Layer` class feeding into this layer. act : activation function The function that is applied to the layer activations. shape : list of shape shape of the filters, [filter_depth, filter_height, filter_width, in_channels, out_channels]. strides : a list of ints. 1-D of length 4. The stride of the sliding window for each dimension of input. Must be in the same order as the dimension specified with format. padding : a string from: "SAME", "VALID". The type of padding algorithm to use. W_init : weights initializer The initializer for initializing the weight matrix. b_init : biases initializer The initializer for initializing the bias vector. W_init_args : dictionary The arguments for the weights initializer. b_init_args : dictionary The arguments for the biases initializer. name : a string or None An optional name to attach to this layer. """ def __init__( self, layer = None, act = tf.identity, shape = [2, 2, 2, 64, 128], strides=[1, 2, 2, 2, 1], padding='SAME', W_init = tf.truncated_normal_initializer(stddev=0.02), b_init = tf.constant_initializer(value=0.0), W_init_args = {}, b_init_args = {}, name ='cnn3d_layer', ): Layer.__init__(self, name=name) self.inputs = layer.outputs print(" [TL] Conv3dLayer %s: shape:%s strides:%s pad:%s act:%s" % (self.name, str(shape), str(strides), padding, act.__name__)) with tf.variable_scope(name) as vs: # W = tf.Variable(W_init(shape=shape, **W_init_args), name='W_conv') # b = tf.Variable(b_init(shape=[shape[-1]], **b_init_args), name='b_conv') W = tf.get_variable(name='W_conv3d', shape=shape, initializer=W_init, **W_init_args ) b = tf.get_variable(name='b_conv3d', shape=(shape[-1]), initializer=b_init, **b_init_args ) self.outputs = act( tf.nn.conv3d(self.inputs, W, strides=strides, padding=padding, name=None) + b ) # self.outputs = act( tf.nn.conv3d(self.inputs, W, strides=strides, padding=padding, name=None) + b ) self.all_layers = list(layer.all_layers) self.all_params = list(layer.all_params) self.all_drop = dict(layer.all_drop) self.all_layers.extend( [self.outputs] ) self.all_params.extend( [W, b] ) class DeConv3dLayer(Layer): """The :class:`DeConv3dLayer` class is deconvolutional 3D layer, see `tf.nn.conv3d_transpose <https://www.tensorflow.org/versions/master/api_docs/python/nn.html#conv3d_transpose>`_. Parameters ---------- layer : a :class:`Layer` instance The `Layer` class feeding into this layer. act : activation function The function that is applied to the layer activations. shape : list of shape shape of the filters, [depth, height, width, output_channels, in_channels], filter's in_channels dimension must match that of value. output_shape : list of output shape representing the output shape of the deconvolution op. strides : a list of ints. The stride of the sliding window for each dimension of the input tensor. padding : a string from: "SAME", "VALID". The type of padding algorithm to use. W_init : weights initializer The initializer for initializing the weight matrix. b_init : biases initializer The initializer for initializing the bias vector. W_init_args : dictionary The arguments for the weights initializer. b_init_args : dictionary The arguments for the biases initializer. name : a string or None An optional name to attach to this layer. """ def __init__( self, layer = None, act = tf.identity, shape = [2, 2, 2, 128, 256], output_shape = [1, 12, 32, 32, 128], strides = [1, 2, 2, 2, 1], padding = 'SAME', W_init = tf.truncated_normal_initializer(stddev=0.02), b_init = tf.constant_initializer(value=0.0), W_init_args = {}, b_init_args = {}, name ='decnn3d_layer', ): Layer.__init__(self, name=name) self.inputs = layer.outputs print(" [TL] DeConv3dLayer %s: shape:%s out_shape:%s strides:%s pad:%s act:%s" % (self.name, str(shape), str(output_shape), str(strides), padding, act.__name__)) with tf.variable_scope(name) as vs: W = tf.get_variable(name='W_deconv3d', shape=shape, initializer=W_init, **W_init_args ) b = tf.get_variable(name='b_deconv3d', shape=(shape[-2]), initializer=b_init, **b_init_args ) self.outputs = act( tf.nn.conv3d_transpose(self.inputs, W, output_shape=output_shape, strides=strides, padding=padding) + b ) self.all_layers = list(layer.all_layers) self.all_params = list(layer.all_params) self.all_drop = dict(layer.all_drop) self.all_layers.extend( [self.outputs] ) self.all_params.extend( [W, b] ) class UpSampling2dLayer(Layer): """The :class:`UpSampling2dLayer` class is upSampling 2d layer, see `tf.image.resize_images <https://www.tensorflow.org/versions/master/api_docs/python/image/resizing#resize_images>`_. Parameters ----------- layer : a layer class with 4-D Tensor of shape [batch, height, width, channels] or 3-D Tensor of shape [height, width, channels]. size : a tuple of int or float. (height, width) scale factor or new size of height and width. is_scale : boolean, if True (default), size is scale factor, otherwise, size is number of pixels of height and width. method : 0, 1, 2, 3. ResizeMethod. Defaults to ResizeMethod.BILINEAR. - ResizeMethod.BILINEAR, Bilinear interpolation. - ResizeMethod.NEAREST_NEIGHBOR, Nearest neighbor interpolation. - ResizeMethod.BICUBIC, Bicubic interpolation. - ResizeMethod.AREA, Area interpolation. align_corners : bool. If true, exactly align all 4 corners of the input and output. Defaults to false. name : a string or None An optional name to attach to this layer. """ def __init__( self, layer = None, size = [], is_scale = True, method = 0, align_corners = False, name ='upsample2d_layer', ): Layer.__init__(self, name=name) self.inputs = layer.outputs if len(self.inputs.get_shape()) == 3: if is_scale: size_h = size[0] * int(self.inputs.get_shape()[0]) size_w = size[1] * int(self.inputs.get_shape()[1]) size = [int(size_h), int(size_w)] elif len(self.inputs.get_shape()) == 4: if is_scale: size_h = size[0] * int(self.inputs.get_shape()[1]) size_w = size[1] * int(self.inputs.get_shape()[2]) size = [int(size_h), int(size_w)] else: raise Exception("Donot support shape %s" % self.inputs.get_shape()) print(" [TL] UpSampling2dLayer %s: is_scale:%s size:%s method:%d align_corners:%s" % (name, is_scale, size, method, align_corners)) with tf.variable_scope(name) as vs: try: self.outputs = tf.image.resize_images(self.inputs, size=size, method=method, align_corners=align_corners) except: # for TF 0.10 self.outputs = tf.image.resize_images(self.inputs, new_height=size[0], new_width=size[1], method=method, align_corners=align_corners) self.all_layers = list(layer.all_layers) self.all_params = list(layer.all_params) self.all_drop = dict(layer.all_drop) self.all_layers.extend( [self.outputs] ) class DownSampling2dLayer(Layer): """The :class:`DownSampling2dLayer` class is downSampling 2d layer, see `tf.image.resize_images <https://www.tensorflow.org/versions/master/api_docs/python/image/resizing#resize_images>`_. Parameters ----------- layer : a layer class with 4-D Tensor of shape [batch, height, width, channels] or 3-D Tensor of shape [height, width, channels]. size : a tupe of int or float. (height, width) scale factor or new size of height and width. is_scale : boolean, if True (default), size is scale factor, otherwise, size is number of pixels of height and width. method : 0, 1, 2, 3. ResizeMethod. Defaults to ResizeMethod.BILINEAR. - ResizeMethod.BILINEAR, Bilinear interpolation. - ResizeMethod.NEAREST_NEIGHBOR, Nearest neighbor interpolation. - ResizeMethod.BICUBIC, Bicubic interpolation. - ResizeMethod.AREA, Area interpolation. align_corners : bool. If true, exactly align all 4 corners of the input and output. Defaults to false. name : a string or None An optional name to attach to this layer. """ def __init__( self, layer = None, size = [], is_scale = True, method = 0, align_corners = False, name ='downsample2d_layer', ): Layer.__init__(self, name=name) self.inputs = layer.outputs if len(self.inputs.get_shape()) == 3: if is_scale: size_h = size[0] * int(self.inputs.get_shape()[0]) size_w = size[1] * int(self.inputs.get_shape()[1]) size = [int(size_h), int(size_w)] elif len(self.inputs.get_shape()) == 4: if is_scale: size_h = size[0] * int(self.inputs.get_shape()[1]) size_w = size[1] * int(self.inputs.get_shape()[2]) size = [int(size_h), int(size_w)] else: raise Exception("Donot support shape %s" % self.inputs.get_shape()) print(" [TL] DownSampling2dLayer %s: is_scale:%s size:%s method:%d, align_corners:%s" % (name, is_scale, size, method, align_corners)) with tf.variable_scope(name) as vs: try: self.outputs = tf.image.resize_images(self.inputs, size=size, method=method, align_corners=align_corners) except: # for TF 0.10 self.outputs = tf.image.resize_images(self.inputs, new_height=size[0], new_width=size[1], method=method, align_corners=align_corners) self.all_layers = list(layer.all_layers) self.all_params = list(layer.all_params) self.all_drop = dict(layer.all_drop) self.all_layers.extend( [self.outputs] ) def AtrousConv1dLayer(net, n_filter=32, filter_size=2, stride=1, dilation=1, act=None, padding='SAME', use_cudnn_on_gpu=None,data_format='NWC', W_init = tf.truncated_normal_initializer(stddev=0.02), b_init = tf.constant_initializer(value=0.0), W_init_args = {}, b_init_args = {},name ='conv1d',): """Wrapper for :class:`AtrousConv1dLayer`, if you don't understand how to use :class:`Conv1dLayer`, this function may be easier. Parameters ---------- net : TensorLayer layer. n_filter : number of filter. filter_size : an int. stride : an int. dilation : an int, filter dilation size. act : None or activation function. others : see :class:`Conv1dLayer`. """ if act is None: act = tf.identity net = Conv1dLayer(layer = net, act = act, shape = [filter_size, int(net.outputs.get_shape()[-1]), n_filter], stride = stride, padding = padding, dilation_rate = dilation, use_cudnn_on_gpu = use_cudnn_on_gpu, data_format = data_format, W_init = W_init, b_init = b_init, W_init_args = W_init_args, b_init_args = b_init_args, name = name, ) return net class AtrousConv2dLayer(Layer): """The :class:`AtrousConv2dLayer` class is Atrous convolution (a.k.a. convolution with holes or dilated convolution) 2D layer, see `tf.nn.atrous_conv2d <https://www.tensorflow.org/versions/master/api_docs/python/nn.html#atrous_conv2d>`_. Parameters ----------- layer : a layer class with 4-D Tensor of shape [batch, height, width, channels]. filters : A 4-D Tensor with the same type as value and shape [filter_height, filter_width, in_channels, out_channels]. filters' in_channels dimension must match that of value. Atrous convolution is equivalent to standard convolution with upsampled filters with effective height filter_height + (filter_height - 1) * (rate - 1) and effective width filter_width + (filter_width - 1) * (rate - 1), produced by inserting rate - 1 zeros along consecutive elements across the filters' spatial dimensions. n_filter : number of filter. filter_size : tuple (height, width) for filter size. rate : A positive int32. The stride with which we sample input values across the height and width dimensions. Equivalently, the rate by which we upsample the filter values by inserting zeros across the height and width dimensions. In the literature, the same parameter is sometimes called input stride or dilation. act : activation function, None for linear. padding : A string, either 'VALID' or 'SAME'. The padding algorithm. W_init : weights initializer. The initializer for initializing the weight matrix. b_init : biases initializer or None. The initializer for initializing the bias vector. If None, skip biases. W_init_args : dictionary. The arguments for the weights tf.get_variable(). b_init_args : dictionary. The arguments for the biases tf.get_variable(). name : a string or None, an optional name to attach to this layer. """ def __init__( self, layer = None, n_filter = 32, filter_size = (3,3), rate = 2, act = None, padding = 'SAME', W_init = tf.truncated_normal_initializer(stddev=0.02), b_init = tf.constant_initializer(value=0.0), W_init_args = {}, b_init_args = {}, name = 'atrou2d' ): Layer.__init__(self, name=name) self.inputs = layer.outputs print(" [TL] AtrousConv2dLayer %s: n_filter:%d filter_size:%s rate:%d pad:%s act:%s" % (self.name, n_filter, filter_size, rate, padding, act.__name__)) if act is None: act = tf.identity with tf.variable_scope(name) as vs: shape = [filter_size[0], filter_size[1], int(self.inputs.get_shape()[-1]), n_filter] filters = tf.get_variable(name='filter', shape=shape, initializer=W_init, **W_init_args ) if b_init: b = tf.get_variable(name='b', shape=(n_filter), initializer=b_init, **b_init_args ) self.outputs = act(tf.nn.atrous_conv2d(self.inputs, filters, rate, padding) + b) else: self.outputs = act(tf.nn.atrous_conv2d(self.inputs, filters, rate, padding)) self.all_layers = list(layer.all_layers) self.all_params = list(layer.all_params) self.all_drop = dict(layer.all_drop) self.all_layers.extend( [self.outputs] ) if b_init: self.all_params.extend( [filters, b] ) else: self.all_params.extend( [filters] ) class SeparableConv2dLayer(Layer):# Untested """The :class:`SeparableConv2dLayer` class is 2-D convolution with separable filters, see `tf.layers.separable_conv2d <https://www.tensorflow.org/api_docs/python/tf/layers/separable_conv2d>`_. Parameters ----------- layer : a layer class filters : integer, the dimensionality of the output space (i.e. the number output of filters in the convolution). kernel_size : a tuple or list of N positive integers specifying the spatial dimensions of of the filters. Can be a single integer to specify the same value for all spatial dimensions. strides : a tuple or list of N positive integers specifying the strides of the convolution. Can be a single integer to specify the same value for all spatial dimensions. Specifying any stride value != 1 is incompatible with specifying any dilation_rate value != 1. padding : one of "valid" or "same" (case-insensitive). data_format : A string, one of channels_last (default) or channels_first. The ordering of the dimensions in the inputs. channels_last corresponds to inputs with shapedata_format = 'NWHC' (batch, width, height, channels) while channels_first corresponds to inputs with shape (batch, channels, width, height). dilation_rate : an integer or tuple/list of 2 integers, specifying the dilation rate to use for dilated convolution. Can be a single integer to specify the same value for all spatial dimensions. Currently, specifying any dilation_rate value != 1 is incompatible with specifying any stride value != 1. depth_multiplier : The number of depthwise convolution output channels for each input channel. The total number of depthwise convolution output channels will be equal to num_filters_in * depth_multiplier. act (activation) : Activation function. Set it to None to maintain a linear activation. use_bias : Boolean, whether the layer uses a bias. depthwise_initializer : An initializer for the depthwise convolution kernel. pointwise_initializer : An initializer for the pointwise convolution kernel. bias_initializer : An initializer for the bias vector. If None, no bias will be applied. depthwise_regularizer : Optional regularizer for the depthwise convolution kernel. pointwise_regularizer : Optional regularizer for the pointwise convolution kernel. bias_regularizer : Optional regularizer for the bias vector. activity_regularizer : Regularizer function for the output. name : a string or None, an optional name to attach to this layer. """ def __init__( self, layer = None, filters = None, kernel_size=5, strides=(1, 1), padding='valid', data_format='channels_last', dilation_rate=(1, 1), depth_multiplier=1, act=None, use_bias=True, depthwise_initializer=None, pointwise_initializer=None, bias_initializer=tf.zeros_initializer, depthwise_regularizer=None, pointwise_regularizer=None, bias_regularizer=None, activity_regularizer=None, name = 'atrou2d' ): Layer.__init__(self, name=name) self.inputs = layer.outputs assert filters is not None assert tf.__version__ > "0.12.1", "This layer only supports for TF 1.0+" if act is None: act = tf.identity bias_initializer = bias_initializer() print(" [TL] SeparableConv2dLayer %s: filters:%s kernel_size:%s strides:%s padding:%s dilation_rate:%s depth_multiplier:%s act:%s" % (self.name, str(filters), str(kernel_size), str(strides), padding, str(dilation_rate), str(depth_multiplier), act.__name__)) with tf.variable_scope(name) as vs: self.outputs = tf.layers.separable_conv2d(self.inputs, filters, kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, depth_multiplier=depth_multiplier, activation=act, use_bias=use_bias, depthwise_initializer=depthwise_initializer, pointwise_initializer=pointwise_initializer, bias_initializer=bias_initializer, depthwise_regularizer=depthwise_regularizer, pointwise_regularizer=pointwise_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer,) #trainable=True, name=None, reuse=None) variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name) self.all_layers = list(layer.all_layers) self.all_params = list(layer.all_params) self.all_drop = dict(layer.all_drop) self.all_layers.extend( [self.outputs] ) self.all_params.extend( variables ) ## Initializers for Convuolutional Layers def deconv2d_bilinear_upsampling_initializer(shape): """Returns initializer that can be passed to DeConv2dLayer to initalize the weights to correspond to channel wise bilinear upsampling. Used in some segmantic segmentation approches such as [FCN](https://arxiv.org/abs/1605.06211) Parameters ---------- shape : list of shape shape of the filters, [height, width, output_channels, in_channels], must match that passed to DeConv2dLayer Returns ---------- tf.constant_initializer with weights set to correspond to per channel bilinear upsampling when passed as W_int in DeConv2dLayer Examples -------- >>> rescale_factor = 2 #upsampling by a factor of 2, ie e.g 100->200 >>> filter_size = (2 * rescale_factor - rescale_factor % 2) #Corresponding bilinear filter size >>> num_in_channels = 3 >>> num_out_channels = 3 >>> deconv_filter_shape = [filter_size, filter_size, num_out_channels, num_in_channels] >>> x = tf.placeholder(tf.float32, [1, imsize, imsize, num_channels]) >>> network = tl.layers.InputLayer(x, name='input_layer') >>> bilinear_init = deconv2d_bilinear_upsampling_initializer(shape=filter_shape) >>> network = tl.layers.DeConv2dLayer(network, shape = filter_shape, output_shape = [1, imsize*rescale_factor, imsize*rescale_factor, num_out_channels], strides=[1, rescale_factor, rescale_factor, 1], W_init=bilinear_init, padding='SAME', act=tf.identity, name='g/h1/decon2d') """ if shape[0] != shape[1]: raise Exception('deconv2d_bilinear_upsampling_initializer only supports symmetrical filter sizes') if shape[3] < shape [2]: raise Exception('deconv2d_bilinear_upsampling_initializer behaviour is not defined for num_in_channels < num_out_channels ') filter_size = shape[0] num_out_channels = shape[2] num_in_channels = shape[3] #Create bilinear filter kernel as numpy array bilinear_kernel = np.zeros([filter_size, filter_size], dtype=np.float32) scale_factor = (filter_size + 1) // 2 if filter_size % 2 == 1: center = scale_factor - 1 else: center = scale_factor - 0.5 for x in range(filter_size): for y in range(filter_size): bilinear_kernel[x,y] = (1 - abs(x - center) / scale_factor) * \ (1 - abs(y - center) / scale_factor) weights = np.zeros((filter_size, filter_size, num_out_channels, num_in_channels)) for i in range(num_out_channels): weights[:, :, i, i] = bilinear_kernel #assign numpy array to constant_initalizer and pass to get_variable bilinear_weights_init = tf.constant_initializer(value=weights, dtype=tf.float32) return bilinear_weights_init ## Convolutional layer (Simplified) def Conv1d(net, n_filter=32, filter_size=5, stride=1, dilation_rate=1, act=None, padding='SAME', use_cudnn_on_gpu=None, data_format="NWC", W_init = tf.truncated_normal_initializer(stddev=0.02), b_init = tf.constant_initializer(value=0.0), W_init_args = {}, b_init_args = {}, name ='conv1d',): """Wrapper for :class:`Conv1dLayer`, if you don't understand how to use :class:`Conv1dLayer`, this function may be easier. Parameters ---------- net : TensorLayer layer. n_filter : number of filter. filter_size : an int. stride : an int. dilation_rate : As it is 1D conv, the default is "NWC". act : None or activation function. others : see :class:`Conv1dLayer`. """ if act is None: act = tf.identity net = Conv1dLayer(layer = net, act = act, shape = [filter_size, int(net.outputs.get_shape()[-1]), n_filter], stride = stride, dilation_rate = dilation_rate, padding = padding, use_cudnn_on_gpu = use_cudnn_on_gpu, data_format = data_format, W_init = W_init, b_init = b_init, W_init_args = W_init_args, b_init_args = b_init_args, name = name, ) return net def Conv2d(net, n_filter=32, filter_size=(3, 3), strides=(1, 1), act = None, padding='SAME', W_init = tf.truncated_normal_initializer(stddev=0.02), b_init = tf.constant_initializer(value=0.0), W_init_args = {}, b_init_args = {}, use_cudnn_on_gpu = None, data_format = None,name ='conv2d',): """Wrapper for :class:`Conv2dLayer`, if you don't understand how to use :class:`Conv2dLayer`, this function may be easier. Parameters ---------- net : TensorLayer layer. n_filter : number of filter. filter_size : tuple (height, width) for filter size. strides : tuple (height, width) for strides. act : None or activation function. others : see :class:`Conv2dLayer`. Examples -------- >>> w_init = tf.truncated_normal_initializer(stddev=0.01) >>> b_init = tf.constant_initializer(value=0.0) >>> inputs = InputLayer(x, name='inputs') >>> conv1 = Conv2d(inputs, 64, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv1_1') >>> conv1 = Conv2d(conv1, 64, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv1_2') >>> pool1 = MaxPool2d(conv1, (2, 2), padding='SAME', name='pool1') >>> conv2 = Conv2d(pool1, 128, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv2_1') >>> conv2 = Conv2d(conv2, 128, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv2_2') >>> pool2 = MaxPool2d(conv2, (2, 2), padding='SAME', name='pool2') """ assert len(strides) == 2, "len(strides) should be 2, Conv2d and Conv2dLayer are different." if act is None: act = tf.identity try: pre_channel = int(net.outputs.get_shape()[-1]) except: # if pre_channel is ?, it happens when using Spatial Transformer Net pre_channel = 1 print("[warnings] unknow input channels, set to 1") net = Conv2dLayer(net, act = act, shape = [filter_size[0], filter_size[1], pre_channel, n_filter], # 32 features for each 5x5 patch strides = [1, strides[0], strides[1], 1], padding = padding, W_init = W_init, W_init_args = W_init_args, b_init = b_init, b_init_args = b_init_args, use_cudnn_on_gpu = use_cudnn_on_gpu, data_format = data_format, name = name) return net def DeConv2d(net, n_out_channel = 32, filter_size=(3, 3), out_size = (30, 30), strides = (2, 2), padding = 'SAME', batch_size = None, act = None, W_init = tf.truncated_normal_initializer(stddev=0.02), b_init = tf.constant_initializer(value=0.0), W_init_args = {}, b_init_args = {}, name ='decnn2d'): """Wrapper for :class:`DeConv2dLayer`, if you don't understand how to use :class:`DeConv2dLayer`, this function may be easier. Parameters ---------- net : TensorLayer layer. n_out_channel : int, number of output channel. filter_size : tuple of (height, width) for filter size. out_size : tuple of (height, width) of output. batch_size : int or None, batch_size. If None, try to find the batch_size from the first dim of net.outputs (you should tell the batch_size when define the input placeholder). strides : tuple of (height, width) for strides. act : None or activation function. others : see :class:`DeConv2dLayer`. """ assert len(strides) == 2, "len(strides) should be 2, DeConv2d and DeConv2dLayer are different." if act is None: act = tf.identity # if batch_size is None: # batch_size = tf.shape(net.outputs)[0] fixed_batch_size = net.outputs.get_shape().with_rank_at_least(1)[0] if fixed_batch_size.value: batch_size = fixed_batch_size.value else: from tensorflow.python.ops import array_ops batch_size = array_ops.shape(net.outputs)[0] net = DeConv2dLayer(layer = net, act = act, shape = [filter_size[0], filter_size[1], n_out_channel, int(net.outputs.get_shape()[-1])], output_shape = [batch_size, int(out_size[0]), int(out_size[1]), n_out_channel], strides = [1, strides[0], strides[1], 1], padding = padding, W_init = W_init, b_init = b_init, W_init_args = W_init_args, b_init_args = b_init_args, name = name) return net def MaxPool1d(net, filter_size, strides, padding='valid', data_format='channels_last', name=None): #Untested """Wrapper for `tf.layers.max_pooling1d <https://www.tensorflow.org/api_docs/python/tf/layers/max_pooling1d>`_ . Parameters ------------ net : TensorLayer layer, the tensor over which to pool. Must have rank 3. filter_size (pool_size) : An integer or tuple/list of a single integer, representing the size of the pooling window. strides : An integer or tuple/list of a single integer, specifying the strides of the pooling operation. padding : A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format : A string, one of channels_last (default) or channels_first. The ordering of the dimensions in the inputs. channels_last corresponds to inputs with shape (batch, length, channels) while channels_first corresponds to inputs with shape (batch, channels, length). name : A string, the name of the layer. Returns -------- - A :class:`Layer` which the output tensor, of rank 3. """ print(" [TL] MaxPool1d %s: filter_size:%s strides:%s padding:%s" % (name, str(filter_size), str(strides), str(padding))) outputs = tf.layers.max_pooling1d(net.outputs, filter_size, strides, padding=padding, data_format=data_format, name=name) net_new = copy.copy(net) net_new.outputs = outputs net_new.all_layers.extend( [outputs] ) return net_new def MeanPool1d(net, filter_size, strides, padding='valid', data_format='channels_last', name=None): #Untested """Wrapper for `tf.layers.average_pooling1d <https://www.tensorflow.org/api_docs/python/tf/layers/average_pooling1d>`_ . Parameters ------------ net : TensorLayer layer, the tensor over which to pool. Must have rank 3. filter_size (pool_size) : An integer or tuple/list of a single integer, representing the size of the pooling window. strides : An integer or tuple/list of a single integer, specifying the strides of the pooling operation. padding : A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format : A string, one of channels_last (default) or channels_first. The ordering of the dimensions in the inputs. channels_last corresponds to inputs with shape (batch, length, channels) while channels_first corresponds to inputs with shape (batch, channels, length). name : A string, the name of the layer. Returns -------- - A :class:`Layer` which the output tensor, of rank 3. """ print(" [TL] MeanPool1d %s: filter_size:%s strides:%s padding:%s" % (name, str(filter_size), str(strides), str(padding))) outputs = tf.layers.average_pooling1d(net.outputs, filter_size, strides, padding=padding, data_format=data_format, name=name) net_new = copy.copy(net) net_new.outputs = outputs net_new.all_layers.extend( [outputs] ) return net_new def MaxPool2d(net, filter_size=(2, 2), strides=None, padding='SAME', name='maxpool'): """Wrapper for :class:`PoolLayer`. Parameters ----------- net : TensorLayer layer. filter_size : tuple of (height, width) for filter size. strides : tuple of (height, width). Default is the same with filter_size. others : see :class:`PoolLayer`. """ if strides is None: strides = filter_size assert len(strides) == 2, "len(strides) should be 2, MaxPool2d and PoolLayer are different." net = PoolLayer(net, ksize=[1, filter_size[0], filter_size[1], 1], strides=[1, strides[0], strides[1], 1], padding=padding, pool = tf.nn.max_pool, name = name) return net def MeanPool2d(net, filter_size=(2, 2), strides=None, padding='SAME', name='meanpool'): """Wrapper for :class:`PoolLayer`. Parameters ----------- net : TensorLayer layer. filter_size : tuple of (height, width) for filter size. strides : tuple of (height, width). Default is the same with filter_size. others : see :class:`PoolLayer`. """ if strides is None: strides = filter_size assert len(strides) == 2, "len(strides) should be 2, MeanPool2d and PoolLayer are different." net = PoolLayer(net, ksize=[1, filter_size[0], filter_size[1], 1], strides=[1, strides[0], strides[1], 1], padding=padding, pool = tf.nn.avg_pool, name = name) return net def MaxPool3d(net, filter_size, strides, padding='valid', data_format='channels_last', name=None): #Untested """Wrapper for `tf.layers.max_pooling3d <https://www.tensorflow.org/api_docs/python/tf/layers/max_pooling3d>`_ . Parameters ------------ net : TensorLayer layer, the tensor over which to pool. Must have rank 5. filter_size (pool_size) : An integer or tuple/list of 3 integers: (pool_depth, pool_height, pool_width) specifying the size of the pooling window. Can be a single integer to specify the same value for all spatial dimensions. strides : An integer or tuple/list of 3 integers, specifying the strides of the pooling operation. Can be a single integer to specify the same value for all spatial dimensions. padding : A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format : A string. The ordering of the dimensions in the inputs. channels_last (default) and channels_first are supported. channels_last corresponds to inputs with shape (batch, depth, height, width, channels) while channels_first corresponds to inputs with shape (batch, channels, depth, height, width). name : A string, the name of the layer. """ print(" [TL] MaxPool3d %s: filter_size:%s strides:%s padding:%s" % (name, str(filter_size), str(strides), str(padding))) outputs = tf.layers.max_pooling3d(net.outputs, filter_size, strides, padding=padding, data_format=data_format, name=name) net_new = copy.copy(net) net_new.outputs = outputs net_new.all_layers.extend( [outputs] ) return net_new def MeanPool3d(net, filter_size, strides, padding='valid', data_format='channels_last', name=None): #Untested """Wrapper for `tf.layers.average_pooling3d <https://www.tensorflow.org/api_docs/python/tf/layers/average_pooling3d>`_ Parameters ------------ net : TensorLayer layer, the tensor over which to pool. Must have rank 5. filter_size (pool_size) : An integer or tuple/list of 3 integers: (pool_depth, pool_height, pool_width) specifying the size of the pooling window. Can be a single integer to specify the same value for all spatial dimensions. strides : An integer or tuple/list of 3 integers, specifying the strides of the pooling operation. Can be a single integer to specify the same value for all spatial dimensions. padding : A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format : A string. The ordering of the dimensions in the inputs. channels_last (default) and channels_first are supported. channels_last corresponds to inputs with shape (batch, depth, height, width, channels) while channels_first corresponds to inputs with shape (batch, channels, depth, height, width). name : A string, the name of the layer. """ print(" [TL] MeanPool3d %s: filter_size:%s strides:%s padding:%s" % (name, str(filter_size), str(strides), str(padding))) outputs = tf.layers.average_pooling3d(net.outputs, filter_size, strides, padding=padding, data_format=data_format, name=name) net_new = copy.copy(net) net_new.outputs = outputs net_new.all_layers.extend( [outputs] ) return net_new ## Super resolution def SubpixelConv2d(net, scale=2, n_out_channel=None, act=tf.identity, name='subpixel_conv2d'): """The :class:`SubpixelConv2d` class is a sub-pixel 2d convolutional ayer, usually be used for Super-Resolution applications, `example code <https://github.com/zsdonghao/SRGAN/>`_. Parameters ------------ net : TensorLayer layer. scale : int, upscaling ratio, a wrong setting will lead to Dimension size error. n_out_channel : int or None, the number of output channels. Note that, the number of input channels == (scale x scale) x The number of output channels. If None, automatically set n_out_channel == the number of input channels / (scale x scale). act : activation function. name : string. An optional name to attach to this layer. Examples --------- >>> # examples here just want to tell you how to set the n_out_channel. >>> x = np.random.rand(2, 16, 16, 4) >>> X = tf.placeholder("float32", shape=(2, 16, 16, 4), name="X") >>> net = InputLayer(X, name='input') >>> net = SubpixelConv2d(net, scale=2, n_out_channel=1, name='subpixel_conv2d') >>> y = sess.run(net.outputs, feed_dict={X: x}) >>> print(x.shape, y.shape) ... (2, 16, 16, 4) (2, 32, 32, 1) >>> >>> x = np.random.rand(2, 16, 16, 4*10) >>> X = tf.placeholder("float32", shape=(2, 16, 16, 4*10), name="X") >>> net = InputLayer(X, name='input2') >>> net = SubpixelConv2d(net, scale=2, n_out_channel=10, name='subpixel_conv2d2') >>> y = sess.run(net.outputs, feed_dict={X: x}) >>> print(x.shape, y.shape) ... (2, 16, 16, 40) (2, 32, 32, 10) >>> >>> x = np.random.rand(2, 16, 16, 25*10) >>> X = tf.placeholder("float32", shape=(2, 16, 16, 25*10), name="X") >>> net = InputLayer(X, name='input3') >>> net = SubpixelConv2d(net, scale=5, n_out_channel=None, name='subpixel_conv2d3') >>> y = sess.run(net.outputs, feed_dict={X: x}) >>> print(x.shape, y.shape) ... (2, 16, 16, 250) (2, 80, 80, 10) References ------------ - `Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional Neural Network <https://arxiv.org/pdf/1609.05158.pdf>`_ """ # github/Tetrachrome/subpixel https://github.com/Tetrachrome/subpixel/blob/master/subpixel.py _err_log = "SubpixelConv2d: The number of input channels == (scale x scale) x The number of output channels" scope_name = tf.get_variable_scope().name if scope_name: name = scope_name + '/' + name def _PS(X, r, n_out_channel): if n_out_channel >= 1: assert int(X.get_shape()[-1]) == (r ** 2) * n_out_channel, _err_log bsize, a, b, c = X.get_shape().as_list() bsize = tf.shape(X)[0] # Handling Dimension(None) type for undefined batch dim Xs=tf.split(X,r,3) #b*h*w*r*r Xr=tf.concat(Xs,2) #b*h*(r*w)*r X=tf.reshape(Xr,(bsize,r*a,r*b,n_out_channel)) # b*(r*h)*(r*w)*c else: print(_err_log) return X inputs = net.outputs if n_out_channel is None: assert int(inputs.get_shape()[-1])/ (scale ** 2) % 1 == 0, _err_log n_out_channel = int(int(inputs.get_shape()[-1])/ (scale ** 2)) print(" [TL] SubpixelConv2d %s: scale: %d n_out_channel: %s act: %s" % (name, scale, n_out_channel, act.__name__)) net_new = Layer(inputs, name=name) # with tf.name_scope(name): with tf.variable_scope(name) as vs: net_new.outputs = act(_PS(inputs, r=scale, n_out_channel=n_out_channel)) net_new.all_layers = list(net.all_layers) net_new.all_params = list(net.all_params) net_new.all_drop = dict(net.all_drop) net_new.all_layers.extend( [net_new.outputs] ) return net_new def SubpixelConv2d_old(net, scale=2, n_out_channel=None, act=tf.identity, name='subpixel_conv2d'): """The :class:`SubpixelConv2d` class is a sub-pixel 2d convolutional ayer, usually be used for Super-Resolution applications, `example code <https://github.com/zsdonghao/SRGAN/>`_. Parameters ------------ net : TensorLayer layer. scale : int, upscaling ratio, a wrong setting will lead to Dimension size error. n_out_channel : int or None, the number of output channels. Note that, the number of input channels == (scale x scale) x The number of output channels. If None, automatically set n_out_channel == the number of input channels / (scale x scale). act : activation function. name : string. An optional name to attach to this layer. Examples --------- >>> # examples here just want to tell you how to set the n_out_channel. >>> x = np.random.rand(2, 16, 16, 4) >>> X = tf.placeholder("float32", shape=(2, 16, 16, 4), name="X") >>> net = InputLayer(X, name='input') >>> net = SubpixelConv2d(net, scale=2, n_out_channel=1, name='subpixel_conv2d') >>> y = sess.run(net.outputs, feed_dict={X: x}) >>> print(x.shape, y.shape) ... (2, 16, 16, 4) (2, 32, 32, 1) >>> >>> x = np.random.rand(2, 16, 16, 4*10) >>> X = tf.placeholder("float32", shape=(2, 16, 16, 4*10), name="X") >>> net = InputLayer(X, name='input2') >>> net = SubpixelConv2d(net, scale=2, n_out_channel=10, name='subpixel_conv2d2') >>> y = sess.run(net.outputs, feed_dict={X: x}) >>> print(x.shape, y.shape) ... (2, 16, 16, 40) (2, 32, 32, 10) >>> >>> x = np.random.rand(2, 16, 16, 25*10) >>> X = tf.placeholder("float32", shape=(2, 16, 16, 25*10), name="X") >>> net = InputLayer(X, name='input3') >>> net = SubpixelConv2d(net, scale=5, n_out_channel=None, name='subpixel_conv2d3') >>> y = sess.run(net.outputs, feed_dict={X: x}) >>> print(x.shape, y.shape) ... (2, 16, 16, 250) (2, 80, 80, 10) References ------------ - `Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional Neural Network <https://arxiv.org/pdf/1609.05158.pdf>`_ """ # github/Tetrachrome/subpixel https://github.com/Tetrachrome/subpixel/blob/master/subpixel.py _err_log = "SubpixelConv2d: The number of input channels == (scale x scale) x The number of output channels" scope_name = tf.get_variable_scope().name if scope_name: name = scope_name + '/' + name def _phase_shift(I, r): if tf.__version__ < '1.0': raise Exception("Only support TF1.0+") bsize, a, b, c = I.get_shape().as_list() bsize = tf.shape(I)[0] # Handling Dimension(None) type for undefined batch dim X = tf.reshape(I, (bsize, a, b, r, r)) X = tf.transpose(X, (0, 1, 2, 4, 3)) # bsize, a, b, 1, 1 # tf 0.12 # X = tf.split(1, a, X) # a, [bsize, b, r, r] # tf 0.12 X = tf.split(X, a, 1) # X = tf.concat(2, [tf.squeeze(x, axis=1) for x in X]) # bsize, b, a*r, r # tf 0.12 X = tf.concat([tf.squeeze(x, axis=1) for x in X], 2) # X = tf.split(1, b, X) # b, [bsize, a*r, r] # tf 0.12 X = tf.split(X, b, 1) # X = tf.concat(2, [tf.squeeze(x, axis=1) for x in X]) # bsize, a*r, b*r # tf 0.12 X = tf.concat([tf.squeeze(x, axis=1) for x in X], 2) return tf.reshape(X, (bsize, a*r, b*r, 1)) def _PS(X, r, n_out_channel): if n_out_channel > 1: assert int(X.get_shape()[-1]) == (r ** 2) * n_out_channel, _err_log Xc = tf.split(X, n_out_channel, 3) X = tf.concat([_phase_shift(x, r) for x in Xc], 3) elif n_out_channel == 1: assert int(X.get_shape()[-1]) == (r ** 2), _err_log X = _phase_shift(X, r) else: print(_err_log) return X inputs = net.outputs if n_out_channel is None: assert int(inputs.get_shape()[-1])/ (scale ** 2) % 1 == 0, _err_log n_out_channel = int(int(inputs.get_shape()[-1])/ (scale ** 2)) print(" [TL] SubpixelConv2d %s: scale: %d n_out_channel: %s act: %s" % (name, scale, n_out_channel, act.__name__)) net_new = Layer(inputs, name=name) # with tf.name_scope(name): with tf.variable_scope(name) as vs: net_new.outputs = act(_PS(inputs, r=scale, n_out_channel=n_out_channel)) net_new.all_layers = list(net.all_layers) net_new.all_params = list(net.all_params) net_new.all_drop = dict(net.all_drop) net_new.all_layers.extend( [net_new.outputs] ) return net_new ## Spatial Transformer Nets def transformer(U, theta, out_size, name='SpatialTransformer2dAffine', **kwargs): """Spatial Transformer Layer for `2D Affine Transformation <https://en.wikipedia.org/wiki/Affine_transformation>`_ , see :class:`SpatialTransformer2dAffineLayer` class. Parameters ---------- U : float The output of a convolutional net should have the shape [num_batch, height, width, num_channels]. theta: float The output of the localisation network should be [num_batch, 6], value range should be [0, 1] (via tanh). out_size: tuple of two ints The size of the output of the network (height, width) References ---------- - `Spatial Transformer Networks <https://arxiv.org/abs/1506.02025>`_ - `TensorFlow/Models <https://github.com/tensorflow/models/tree/master/transformer>`_ Notes ----- - To initialize the network to the identity transform init. >>> ``theta`` to >>> identity = np.array([[1., 0., 0.], ... [0., 1., 0.]]) >>> identity = identity.flatten() >>> theta = tf.Variable(initial_value=identity) """ def _repeat(x, n_repeats): with tf.variable_scope('_repeat'): rep = tf.transpose( tf.expand_dims(tf.ones(shape=tf.stack([n_repeats, ])), 1), [1, 0]) rep = tf.cast(rep, 'int32') x = tf.matmul(tf.reshape(x, (-1, 1)), rep) return tf.reshape(x, [-1]) def _interpolate(im, x, y, out_size): with tf.variable_scope('_interpolate'): # constants num_batch = tf.shape(im)[0] height = tf.shape(im)[1] width = tf.shape(im)[2] channels = tf.shape(im)[3] x = tf.cast(x, 'float32') y = tf.cast(y, 'float32') height_f = tf.cast(height, 'float32') width_f = tf.cast(width, 'float32') out_height = out_size[0] out_width = out_size[1] zero = tf.zeros([], dtype='int32') max_y = tf.cast(tf.shape(im)[1] - 1, 'int32') max_x = tf.cast(tf.shape(im)[2] - 1, 'int32') # scale indices from [-1, 1] to [0, width/height] x = (x + 1.0)*(width_f) / 2.0 y = (y + 1.0)*(height_f) / 2.0 # do sampling x0 = tf.cast(tf.floor(x), 'int32') x1 = x0 + 1 y0 = tf.cast(tf.floor(y), 'int32') y1 = y0 + 1 x0 = tf.clip_by_value(x0, zero, max_x) x1 = tf.clip_by_value(x1, zero, max_x) y0 = tf.clip_by_value(y0, zero, max_y) y1 = tf.clip_by_value(y1, zero, max_y) dim2 = width dim1 = width*height base = _repeat(tf.range(num_batch)*dim1, out_height*out_width) base_y0 = base + y0*dim2 base_y1 = base + y1*dim2 idx_a = base_y0 + x0 idx_b = base_y1 + x0 idx_c = base_y0 + x1 idx_d = base_y1 + x1 # use indices to lookup pixels in the flat image and restore # channels dim im_flat = tf.reshape(im, tf.stack([-1, channels])) im_flat = tf.cast(im_flat, 'float32') Ia = tf.gather(im_flat, idx_a) Ib = tf.gather(im_flat, idx_b) Ic = tf.gather(im_flat, idx_c) Id = tf.gather(im_flat, idx_d) # and finally calculate interpolated values x0_f = tf.cast(x0, 'float32') x1_f = tf.cast(x1, 'float32') y0_f = tf.cast(y0, 'float32') y1_f = tf.cast(y1, 'float32') wa = tf.expand_dims(((x1_f-x) * (y1_f-y)), 1) wb = tf.expand_dims(((x1_f-x) * (y-y0_f)), 1) wc = tf.expand_dims(((x-x0_f) * (y1_f-y)), 1) wd = tf.expand_dims(((x-x0_f) * (y-y0_f)), 1) output = tf.add_n([wa*Ia, wb*Ib, wc*Ic, wd*Id]) return output def _meshgrid(height, width): with tf.variable_scope('_meshgrid'): # This should be equivalent to: # x_t, y_t = np.meshgrid(np.linspace(-1, 1, width), # np.linspace(-1, 1, height)) # ones = np.ones(np.prod(x_t.shape)) # grid = np.vstack([x_t.flatten(), y_t.flatten(), ones]) x_t = tf.matmul(tf.ones(shape=tf.stack([height, 1])), tf.transpose(tf.expand_dims(tf.linspace(-1.0, 1.0, width), 1), [1, 0])) y_t = tf.matmul(tf.expand_dims(tf.linspace(-1.0, 1.0, height), 1), tf.ones(shape=tf.stack([1, width]))) x_t_flat = tf.reshape(x_t, (1, -1)) y_t_flat = tf.reshape(y_t, (1, -1)) ones = tf.ones_like(x_t_flat) grid = tf.concat(axis=0, values=[x_t_flat, y_t_flat, ones]) return grid def _transform(theta, input_dim, out_size): with tf.variable_scope('_transform'): num_batch = tf.shape(input_dim)[0] height = tf.shape(input_dim)[1] width = tf.shape(input_dim)[2] num_channels = tf.shape(input_dim)[3] theta = tf.reshape(theta, (-1, 2, 3)) theta = tf.cast(theta, 'float32') # grid of (x_t, y_t, 1), eq (1) in ref [1] height_f = tf.cast(height, 'float32') width_f = tf.cast(width, 'float32') out_height = out_size[0] out_width = out_size[1] grid = _meshgrid(out_height, out_width) grid = tf.expand_dims(grid, 0) grid = tf.reshape(grid, [-1]) grid = tf.tile(grid, tf.stack([num_batch])) grid = tf.reshape(grid, tf.stack([num_batch, 3, -1])) # Transform A x (x_t, y_t, 1)^T -> (x_s, y_s) T_g = tf.matmul(theta, grid) x_s = tf.slice(T_g, [0, 0, 0], [-1, 1, -1]) y_s = tf.slice(T_g, [0, 1, 0], [-1, 1, -1]) x_s_flat = tf.reshape(x_s, [-1]) y_s_flat = tf.reshape(y_s, [-1]) input_transformed = _interpolate( input_dim, x_s_flat, y_s_flat, out_size) output = tf.reshape( input_transformed, tf.stack([num_batch, out_height, out_width, num_channels])) return output with tf.variable_scope(name): output = _transform(theta, U, out_size) return output def batch_transformer(U, thetas, out_size, name='BatchSpatialTransformer2dAffine'): """Batch Spatial Transformer function for `2D Affine Transformation <https://en.wikipedia.org/wiki/Affine_transformation>`_. Parameters ---------- U : float tensor of inputs [batch, height, width, num_channels] thetas : float a set of transformations for each input [batch, num_transforms, 6] out_size : int the size of the output [out_height, out_width] Returns: float Tensor of size [batch * num_transforms, out_height, out_width, num_channels] """ with tf.variable_scope(name): num_batch, num_transforms = map(int, thetas.get_shape().as_list()[:2]) indices = [[i]*num_transforms for i in xrange(num_batch)] input_repeated = tf.gather(U, tf.reshape(indices, [-1])) return transformer(input_repeated, thetas, out_size) class SpatialTransformer2dAffineLayer(Layer): """The :class:`SpatialTransformer2dAffineLayer` class is a `Spatial Transformer Layer <https://arxiv.org/abs/1506.02025>`_ for `2D Affine Transformation <https://en.wikipedia.org/wiki/Affine_transformation>`_. Parameters ----------- layer : a layer class with 4-D Tensor of shape [batch, height, width, channels] theta_layer : a layer class for the localisation network. In this layer, we will use a :class:`DenseLayer` to make the theta size to [batch, 6], value range to [0, 1] (via tanh). out_size : tuple of two ints. The size of the output of the network (height, width), the feature maps will be resized by this. References ----------- - `Spatial Transformer Networks <https://arxiv.org/abs/1506.02025>`_ - `TensorFlow/Models <https://github.com/tensorflow/models/tree/master/transformer>`_ """ def __init__( self, layer = None, theta_layer = None, out_size = [40, 40], name ='sapatial_trans_2d_affine', ): Layer.__init__(self, name=name) self.inputs = layer.outputs self.theta_layer = theta_layer print(" [TL] SpatialTransformer2dAffineLayer %s: in_size:%s out_size:%s" % (name, self.inputs.get_shape().as_list(), out_size)) with tf.variable_scope(name) as vs: ## 1. make the localisation network to [batch, 6] via Flatten and Dense. if self.theta_layer.outputs.get_shape().ndims > 2: self.theta_layer.outputs = flatten_reshape(self.theta_layer.outputs, 'flatten') ## 2. To initialize the network to the identity transform init. # 2.1 W n_in = int(self.theta_layer.outputs.get_shape()[-1]) shape = (n_in, 6) W = tf.get_variable(name='W', initializer=tf.zeros(shape)) # 2.2 b identity = tf.constant(np.array([[1., 0, 0], [0, 1., 0]]).astype('float32').flatten()) b = tf.get_variable(name='b', initializer=identity) # 2.3 transformation matrix self.theta = tf.nn.tanh(tf.matmul(self.theta_layer.outputs, W) + b) ## 3. Spatial Transformer Sampling # 3.1 transformation self.outputs = transformer(self.inputs, self.theta, out_size=out_size) # 3.2 automatically set batch_size and channels # e.g. [?, 40, 40, ?] --> [64, 40, 40, 1] or [64, 20, 20, 4]/ Hao Dong # fixed_batch_size = self.inputs.get_shape().with_rank_at_least(1)[0] if fixed_batch_size.value: batch_size = fixed_batch_size.value else: from tensorflow.python.ops import array_ops batch_size = array_ops.shape(self.inputs)[0] size = self.inputs.get_shape().as_list() n_channels = self.inputs.get_shape().as_list()[-1] # print(self.outputs) self.outputs = tf.reshape(self.outputs, shape=[batch_size, out_size[0], out_size[1], n_channels]) # print(self.outputs) # exit() ## 4. Get all parameters variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name) ## fixed self.all_layers = list(layer.all_layers) self.all_params = list(layer.all_params) self.all_drop = dict(layer.all_drop) ## theta_layer self.all_layers.extend(theta_layer.all_layers) self.all_params.extend(theta_layer.all_params) self.all_drop.update(theta_layer.all_drop) ## this layer self.all_layers.extend( [self.outputs] ) self.all_params.extend( variables ) # ## Normalization layer class LocalResponseNormLayer(Layer): """The :class:`LocalResponseNormLayer` class is for Local Response Normalization, see ``tf.nn.local_response_normalization`` or ``tf.nn.lrn`` for new TF version. The 4-D input tensor is treated as a 3-D array of 1-D vectors (along the last dimension), and each vector is normalized independently. Within a given vector, each component is divided by the weighted, squared sum of inputs within depth_radius. Parameters ----------- layer : a layer class. Must be one of the following types: float32, half. 4-D. depth_radius : An optional int. Defaults to 5. 0-D. Half-width of the 1-D normalization window. bias : An optional float. Defaults to 1. An offset (usually positive to avoid dividing by 0). alpha : An optional float. Defaults to 1. A scale factor, usually positive. beta : An optional float. Defaults to 0.5. An exponent. name : A string or None, an optional name to attach to this layer. """ def __init__( self, layer = None, depth_radius = None, bias = None, alpha = None, beta = None, name ='lrn_layer', ): Layer.__init__(self, name=name) self.inputs = layer.outputs print(" [TL] LocalResponseNormLayer %s: depth_radius: %d, bias: %f, alpha: %f, beta: %f" % (self.name, depth_radius, bias, alpha, beta)) with tf.variable_scope(name) as vs: self.outputs = tf.nn.lrn(self.inputs, depth_radius=depth_radius, bias=bias, alpha=alpha, beta=beta) self.all_layers = list(layer.all_layers) self.all_params = list(layer.all_params) self.all_drop = dict(layer.all_drop) self.all_layers.extend( [self.outputs] ) class BatchNormLayer(Layer): """ The :class:`BatchNormLayer` class is a normalization layer, see ``tf.nn.batch_normalization`` and ``tf.nn.moments``. Batch normalization on fully-connected or convolutional maps. Parameters ----------- layer : a :class:`Layer` instance The `Layer` class feeding into this layer. decay : float, default is 0.9. A decay factor for ExponentialMovingAverage, use larger value for large dataset. epsilon : float A small float number to avoid dividing by 0. act : activation function. is_train : boolean Whether train or inference. beta_init : beta initializer The initializer for initializing beta gamma_init : gamma initializer The initializer for initializing gamma name : a string or None An optional name to attach to this layer. References ---------- - `Source <https://github.com/ry/tensorflow-resnet/blob/master/resnet.py>`_ - `stackoverflow <http://stackoverflow.com/questions/38312668/how-does-one-do-inference-with-batch-normalization-with-tensor-flow>`_ """ def __init__( self, layer = None, decay = 0.9, epsilon = 0.00001, act = tf.identity, is_train = False, beta_init = tf.zeros_initializer, gamma_init = tf.random_normal_initializer(mean=1.0, stddev=0.002), # tf.ones_initializer, name ='batchnorm_layer', ): Layer.__init__(self, name=name) self.inputs = layer.outputs print(" [TL] BatchNormLayer %s: decay:%f epsilon:%f act:%s is_train:%s" % (self.name, decay, epsilon, act.__name__, is_train)) x_shape = self.inputs.get_shape() params_shape = x_shape[-1:] from tensorflow.python.training import moving_averages from tensorflow.python.ops import control_flow_ops with tf.variable_scope(name) as vs: axis = list(range(len(x_shape) - 1)) ## 1. beta, gamma if tf.__version__ > '0.12.1' and beta_init == tf.zeros_initializer: beta_init = beta_init() beta = tf.get_variable('beta', shape=params_shape, initializer=beta_init, trainable=is_train)#, restore=restore) gamma = tf.get_variable('gamma', shape=params_shape, initializer=gamma_init, trainable=is_train, )#restore=restore) ## 2. if tf.__version__ > '0.12.1': moving_mean_init = tf.zeros_initializer() else: moving_mean_init = tf.zeros_initializer moving_mean = tf.get_variable('moving_mean', params_shape, initializer=moving_mean_init, trainable=False,)# restore=restore) moving_variance = tf.get_variable('moving_variance', params_shape, initializer=tf.constant_initializer(1.), trainable=False,)# restore=restore) ## 3. # These ops will only be preformed when training. mean, variance = tf.nn.moments(self.inputs, axis) try: # TF12 update_moving_mean = moving_averages.assign_moving_average( moving_mean, mean, decay, zero_debias=False) # if zero_debias=True, has bias update_moving_variance = moving_averages.assign_moving_average( moving_variance, variance, decay, zero_debias=False) # if zero_debias=True, has bias # print("TF12 moving") except Exception as e: # TF11 update_moving_mean = moving_averages.assign_moving_average( moving_mean, mean, decay) update_moving_variance = moving_averages.assign_moving_average( moving_variance, variance, decay) # print("TF11 moving") def mean_var_with_update(): with tf.control_dependencies([update_moving_mean, update_moving_variance]): return tf.identity(mean), tf.identity(variance) if is_train: mean, var = mean_var_with_update() self.outputs = act( tf.nn.batch_normalization(self.inputs, mean, var, beta, gamma, epsilon) ) else: self.outputs = act( tf.nn.batch_normalization(self.inputs, moving_mean, moving_variance, beta, gamma, epsilon) ) variables = [beta, gamma, moving_mean, moving_variance] # print(len(variables)) # for idx, v in enumerate(variables): # print(" var {:3}: {:15} {}".format(idx, str(v.get_shape()), v)) # exit() self.all_layers = list(layer.all_layers) self.all_params = list(layer.all_params) self.all_drop = dict(layer.all_drop) self.all_layers.extend( [self.outputs] ) self.all_params.extend( variables ) # class BatchNormLayer_TF(Layer): # Work well TF contrib https://github.com/tensorflow/tensorflow/blob/b826b79718e3e93148c3545e7aa3f90891744cc0/tensorflow/contrib/layers/python/layers/layers.py#L100 # """ # The :class:`BatchNormLayer` class is a normalization layer, see ``tf.nn.batch_normalization`` and ``tf.nn.moments``. # # Batch normalization on fully-connected or convolutional maps. # # Parameters # ----------- # layer : a :class:`Layer` instance # The `Layer` class feeding into this layer. # decay : float # A decay factor for ExponentialMovingAverage. # center: If True, subtract `beta`. If False, `beta` is ignored. # scale: If True, multiply by `gamma`. If False, `gamma` is # not used. When the next layer is linear (also e.g. `nn.relu`), this can be # disabled since the scaling can be done by the next layer. # epsilon : float # A small float number to avoid dividing by 0. # act : activation function. # is_train : boolean # Whether train or inference. # beta_init : beta initializer # The initializer for initializing beta # gamma_init : gamma initializer # The initializer for initializing gamma # name : a string or None # An optional name to attach to this layer. # # References # ---------- # - `Source <https://github.com/ry/tensorflow-resnet/blob/master/resnet.py>`_ # - `stackoverflow <http://stackoverflow.com/questions/38312668/how-does-one-do-inference-with-batch-normalization-with-tensor-flow>`_ # """ # def __init__( # self, # layer = None, # decay = 0.95,#.999, # center = True, # scale = True, # epsilon = 0.00001, # act = tf.identity, # is_train = False, # beta_init = tf.zeros_initializer, # # gamma_init = tf.ones_initializer, # gamma_init = tf.random_normal_initializer(mean=1.0, stddev=0.002), # name ='batchnorm_layer', # ): # Layer.__init__(self, name=name) # self.inputs = layer.outputs # print(" [TL] BatchNormLayer %s: decay: %f, epsilon: %f, act: %s, is_train: %s" % # (self.name, decay, epsilon, act.__name__, is_train)) # from tensorflow.contrib.layers.python.layers import utils # from tensorflow.contrib.framework.python.ops import variables # from tensorflow.python.ops import init_ops # from tensorflow.python.ops import nn # from tensorflow.python.training import moving_averages # from tensorflow.python.framework import ops # from tensorflow.python.ops import variable_scope # variables_collections = None # outputs_collections=None # updates_collections=None#ops.GraphKeys.UPDATE_OPS # # with variable_scope.variable_op_scope([inputs], # # scope, 'BatchNorm', reuse=reuse) as sc: # # with variable_scope.variable_op_scope([self.inputs], None, name) as vs: # with tf.variable_scope(name) as vs: # inputs_shape = self.inputs.get_shape() # dtype = self.inputs.dtype.base_dtype # axis = list(range(len(inputs_shape) - 1)) # [0, 1, 2] # params_shape = inputs_shape[-1:] # # Allocate parameters for the beta and gamma of the normalization. # beta, gamma = None, None # if center: # beta_collections = utils.get_variable_collections(variables_collections, # 'beta') # beta = variables.model_variable('beta', # shape=params_shape, # dtype=dtype, # # initializer=init_ops.zeros_initializer, # initializer=beta_init, # collections=beta_collections,) # # trainable=trainable) # if scale: # gamma_collections = utils.get_variable_collections(variables_collections, # 'gamma') # gamma = variables.model_variable('gamma', # shape=params_shape, # dtype=dtype, # # initializer=init_ops.ones_initializer, # initializer=gamma_init, # collections=gamma_collections,) # # trainable=trainable) # # Create moving_mean and moving_variance variables and add them to the # # appropiate collections. # moving_mean_collections = utils.get_variable_collections( # variables_collections, # 'moving_mean') # moving_mean = variables.model_variable( # 'moving_mean', # shape=params_shape, # dtype=dtype, # # initializer=init_ops.zeros_initializer, # initializer=tf.zeros_initializer, # trainable=False, # collections=moving_mean_collections) # moving_variance_collections = utils.get_variable_collections( # variables_collections, # 'moving_variance') # moving_variance = variables.model_variable( # 'moving_variance', # shape=params_shape, # dtype=dtype, # # initializer=init_ops.ones_initializer, # initializer=tf.constant_initializer(1.), # trainable=False, # collections=moving_variance_collections) # if is_train: # # Calculate the moments based on the individual batch. # mean, variance = nn.moments(self.inputs, axis, shift=moving_mean) # # Update the moving_mean and moving_variance moments. # # update_moving_mean = moving_averages.assign_moving_average( # # moving_mean, mean, decay) # # update_moving_variance = moving_averages.assign_moving_average( # # moving_variance, variance, decay) # # if updates_collections is None: # # # Make sure the updates are computed here. # # with ops.control_dependencies([update_moving_mean, # # update_moving_variance]): # # outputs = nn.batch_normalization( # # self.inputs, mean, variance, beta, gamma, epsilon) # # update_moving_mean = tf.assign(moving_mean, # moving_mean * decay + mean * (1 - decay)) # update_moving_variance = tf.assign(moving_variance, # moving_variance * decay + variance * (1 - decay)) # with tf.control_dependencies([update_moving_mean, update_moving_variance]): # outputs = nn.batch_normalization( # self.inputs, mean, variance, beta, gamma, epsilon) # # else: # # # Collect the updates to be computed later. # # ops.add_to_collections(updates_collections, update_moving_mean) # # ops.add_to_collections(updates_collections, update_moving_variance) # # outputs = nn.batch_normalization( # # self.inputs, mean, variance, beta, gamma, epsilon) # else: # # mean, variance = nn.moments(self.inputs, axis, shift=moving_mean) # outputs = nn.batch_normalization( # self.inputs, moving_mean, moving_variance, beta, gamma, epsilon) # # self.inputs, mean, variance, beta, gamma, epsilon) # outputs.set_shape(self.inputs.get_shape()) # # if activation_fn: # self.outputs = act(outputs) # # # variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name) # # return utils.collect_named_outputs(outputs_collections, sc.name, outputs) # variables = [beta, gamma, moving_mean, moving_variance] # # mean, variance = nn.moments(self.inputs, axis, shift=moving_mean) # self.check_mean = mean # self.check_variance = variance # # self.all_layers = list(layer.all_layers) # self.all_params = list(layer.all_params) # self.all_drop = dict(layer.all_drop) # self.all_layers.extend( [self.outputs] ) # self.all_params.extend( variables ) # # class BatchNormLayer5(Layer): # Akara Work well # """ # The :class:`BatchNormLayer` class is a normalization layer, see ``tf.nn.batch_normalization`` and ``tf.nn.moments``. # # Batch normalization on fully-connected or convolutional maps. # # Parameters # ----------- # layer : a :class:`Layer` instance # The `Layer` class feeding into this layer. # decay : float # A decay factor for ExponentialMovingAverage. # epsilon : float # A small float number to avoid dividing by 0. # act : activation function. # is_train : boolean # Whether train or inference. # beta_init : beta initializer # The initializer for initializing beta # gamma_init : gamma initializer # The initializer for initializing gamma # name : a string or None # An optional name to attach to this layer. # # References # ---------- # - `Source <https://github.com/ry/tensorflow-resnet/blob/master/resnet.py>`_ # - `stackoverflow <http://stackoverflow.com/questions/38312668/how-does-one-do-inference-with-batch-normalization-with-tensor-flow>`_ # """ # def __init__( # self, # layer = None, # decay = 0.9, # epsilon = 0.00001, # act = tf.identity, # is_train = False, # beta_init = tf.zeros_initializer, # # gamma_init = tf.ones_initializer, # gamma_init = tf.random_normal_initializer(mean=1.0, stddev=0.002), # name ='batchnorm_layer', # ): # Layer.__init__(self, name=name) # self.inputs = layer.outputs # print(" [TL] BatchNormLayer %s: decay: %f, epsilon: %f, act: %s, is_train: %s" % # (self.name, decay, epsilon, act.__name__, is_train)) # x_shape = self.inputs.get_shape() # params_shape = x_shape[-1:] # # from tensorflow.python.training import moving_averages # from tensorflow.python.ops import control_flow_ops # # with tf.variable_scope(name) as vs: # axis = list(range(len(x_shape) - 1)) # # ## 1. beta, gamma # beta = tf.get_variable('beta', shape=params_shape, # initializer=beta_init, # trainable=is_train)#, restore=restore) # # gamma = tf.get_variable('gamma', shape=params_shape, # initializer=gamma_init, trainable=is_train, # )#restore=restore) # # ## 2. moving variables during training (not update by gradient!) # moving_mean = tf.get_variable('moving_mean', # params_shape, # initializer=tf.zeros_initializer, # trainable=False,)# restore=restore) # moving_variance = tf.get_variable('moving_variance', # params_shape, # initializer=tf.constant_initializer(1.), # trainable=False,)# restore=restore) # # batch_mean, batch_var = tf.nn.moments(self.inputs, axis) # ## 3. # # These ops will only be preformed when training. # def mean_var_with_update(): # try: # TF12 # update_moving_mean = moving_averages.assign_moving_average( # moving_mean, batch_mean, decay, zero_debias=False) # if zero_debias=True, has bias # update_moving_variance = moving_averages.assign_moving_average( # moving_variance, batch_var, decay, zero_debias=False) # if zero_debias=True, has bias # # print("TF12 moving") # except Exception as e: # TF11 # update_moving_mean = moving_averages.assign_moving_average( # moving_mean, batch_mean, decay) # update_moving_variance = moving_averages.assign_moving_average( # moving_variance, batch_var, decay) # # print("TF11 moving") # # # def mean_var_with_update(): # with tf.control_dependencies([update_moving_mean, update_moving_variance]): # # return tf.identity(update_moving_mean), tf.identity(update_moving_variance) # return tf.identity(batch_mean), tf.identity(batch_var) # # # if not is_train: # if is_train: # mean, var = mean_var_with_update() # else: # mean, var = (moving_mean, moving_variance) # # normed = tf.nn.batch_normalization( # x=self.inputs, # mean=mean, # variance=var, # offset=beta, # scale=gamma, # variance_epsilon=epsilon, # name="tf_bn" # ) # self.outputs = act( normed ) # # variables = [beta, gamma, moving_mean, moving_variance] # # print(len(variables)) # # for idx, v in enumerate(variables): # # print(" var {:3}: {:15} {}".format(idx, str(v.get_shape()), v)) # # exit() # # self.all_layers = list(layer.all_layers) # self.all_params = list(layer.all_params) # self.all_drop = dict(layer.all_drop) # self.all_layers.extend( [self.outputs] ) # self.all_params.extend( variables ) # # self.all_params.extend( [beta, gamma] ) # # class BatchNormLayer4(Layer): # work TFlearn https://github.com/tflearn/tflearn/blob/master/tflearn/layers/normalization.py # """ # The :class:`BatchNormLayer` class is a normalization layer, see ``tf.nn.batch_normalization`` and ``tf.nn.moments``. # # Batch normalization on fully-connected or convolutional maps. # # Parameters # ----------- # layer : a :class:`Layer` instance # The `Layer` class feeding into this layer. # decay : float # A decay factor for ExponentialMovingAverage. # epsilon : float # A small float number to avoid dividing by 0. # act : activation function. # is_train : boolean # Whether train or inference. # beta_init : beta initializer # The initializer for initializing beta # gamma_init : gamma initializer # The initializer for initializing gamma # name : a string or None # An optional name to attach to this layer. # # References # ---------- # - `Source <https://github.com/ry/tensorflow-resnet/blob/master/resnet.py>`_ # - `stackoverflow <http://stackoverflow.com/questions/38312668/how-does-one-do-inference-with-batch-normalization-with-tensor-flow>`_ # """ # def __init__( # self, # layer = None, # decay = 0.999, # epsilon = 0.00001, # act = tf.identity, # is_train = None, # beta_init = tf.zeros_initializer, # # gamma_init = tf.ones_initializer, # gamma_init = tf.random_normal_initializer(mean=1.0, stddev=0.002), # name ='batchnorm_layer', # ): # Layer.__init__(self, name=name) # self.inputs = layer.outputs # print(" [TL] BatchNormLayer %s: decay: %f, epsilon: %f, act: %s, is_train: %s" % # (self.name, decay, epsilon, act.__name__, is_train)) # input_shape = self.inputs.get_shape() # # params_shape = input_shape[-1:] # input_ndim = len(input_shape) # from tensorflow.python.training import moving_averages # from tensorflow.python.ops import control_flow_ops # # # gamma_init = tf.random_normal_initializer(mean=gamma, stddev=stddev) # # # Variable Scope fix for older TF # scope = name # try: # vscope = tf.variable_scope(scope, default_name=name, values=[self.inputs],) # # reuse=reuse) # except Exception: # vscope = tf.variable_op_scope([self.inputs], scope, name)#, reuse=reuse) # # with vscope as scope: # name = scope.name # # with tf.variable_scope(name) as vs: # beta = tf.get_variable('beta', shape=[input_shape[-1]], # initializer=beta_init,) # # initializer=tf.constant_initializer(beta),) # # trainable=trainable, )#restore=restore) # gamma = tf.get_variable('gamma', shape=[input_shape[-1]], # initializer=gamma_init, )#trainable=trainable,) # # restore=restore) # # axis = list(range(input_ndim - 1)) # moving_mean = tf.get_variable('moving_mean', # input_shape[-1:], # initializer=tf.zeros_initializer, # trainable=False,) # # restore=restore) # moving_variance = tf.get_variable('moving_variance', # input_shape[-1:], # initializer=tf.constant_initializer(1.), # trainable=False,) # # restore=restore) # # # Define a function to update mean and variance # def update_mean_var(): # mean, variance = tf.nn.moments(self.inputs, axis) # # # Fix TF 0.12 # try: # update_moving_mean = moving_averages.assign_moving_average( # moving_mean, mean, decay, zero_debias=False) # if zero_debias=True, accuracy is high .. # update_moving_variance = moving_averages.assign_moving_average( # moving_variance, variance, decay, zero_debias=False) # except Exception as e: # TF 11 # update_moving_mean = moving_averages.assign_moving_average( # moving_mean, mean, decay) # update_moving_variance = moving_averages.assign_moving_average( # moving_variance, variance, decay) # # with tf.control_dependencies( # [update_moving_mean, update_moving_variance]): # return tf.identity(mean), tf.identity(variance) # # # Retrieve variable managing training mode # # is_training = tflearn.get_training_mode() # if not is_train: # test : mean=0, std=1 # # if is_train: # train : mean=0, std=1 # is_training = tf.cast(tf.ones([]), tf.bool) # else: # is_training = tf.cast(tf.zeros([]), tf.bool) # mean, var = tf.cond( # is_training, update_mean_var, lambda: (moving_mean, moving_variance)) # # ones zeros # try: # inference = tf.nn.batch_normalization( # self.inputs, mean, var, beta, gamma, epsilon) # inference.set_shape(input_shape) # # Fix for old Tensorflow # except Exception as e: # inference = tf.nn.batch_norm_with_global_normalization( # self.inputs, mean, var, beta, gamma, epsilon, # scale_after_normalization=True, # ) # inference.set_shape(input_shape) # # variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope.name) # 2 params beta, gamma # # variables = [beta, gamma, moving_mean, moving_variance] # # # print(len(variables)) # # for idx, v in enumerate(variables): # # print(" var {:3}: {:15} {}".format(idx, str(v.get_shape()), v.name)) # # exit() # # # Add attributes for easy access # # inference.scope = scope # inference.scope = name # inference.beta = beta # inference.gamma = gamma # # self.outputs = act( inference ) # # self.all_layers = list(layer.all_layers) # self.all_params = list(layer.all_params) # self.all_drop = dict(layer.all_drop) # self.all_layers.extend( [self.outputs] ) # self.all_params.extend( variables ) # class BatchNormLayer2(Layer): # don't work http://r2rt.com/implementing-batch-normalization-in-tensorflow.html # """ # The :class:`BatchNormLayer` class is a normalization layer, see ``tf.nn.batch_normalization`` and ``tf.nn.moments``. # # Batch normalization on fully-connected or convolutional maps. # # Parameters # ----------- # layer : a :class:`Layer` instance # The `Layer` class feeding into this layer. # decay : float # A decay factor for ExponentialMovingAverage. # epsilon : float # A small float number to avoid dividing by 0. # act : activation function. # is_train : boolean # Whether train or inference. # beta_init : beta initializer # The initializer for initializing beta # gamma_init : gamma initializer # The initializer for initializing gamma # name : a string or None # An optional name to attach to this layer. # # References # ---------- # - `Source <https://github.com/ry/tensorflow-resnet/blob/master/resnet.py>`_ # - `stackoverflow <http://stackoverflow.com/questions/38312668/how-does-one-do-inference-with-batch-normalization-with-tensor-flow>`_ # """ # def __init__( # self, # layer = None, # decay = 0.999, # epsilon = 0.00001, # act = tf.identity, # is_train = None, # beta_init = tf.zeros_initializer, # # gamma_init = tf.ones_initializer, # gamma_init = tf.random_normal_initializer(mean=1.0, stddev=0.002), # name ='batchnorm_layer', # ): # Layer.__init__(self, name=name) # self.inputs = layer.outputs # print(" [TL] BatchNormLayer %s: decay: %f, epsilon: %f, act: %s, is_train: %s" % # (self.name, decay, epsilon, act.__name__, is_train)) # x_shape = self.inputs.get_shape() # params_shape = x_shape[-1:] # # with tf.variable_scope(name) as vs: # gamma = tf.get_variable("gamma", shape=params_shape, # initializer=gamma_init) # beta = tf.get_variable("beta", shape=params_shape, # initializer=beta_init) # pop_mean = tf.get_variable("pop_mean", shape=params_shape, # initializer=tf.zeros_initializer, trainable=False) # pop_var = tf.get_variable("pop_var", shape=params_shape, # initializer=tf.constant_initializer(1.), trainable=False) # # if is_train: # batch_mean, batch_var = tf.nn.moments(self.inputs, list(range(len(x_shape) - 1))) # train_mean = tf.assign(pop_mean, # pop_mean * decay + batch_mean * (1 - decay)) # train_var = tf.assign(pop_var, # pop_var * decay + batch_var * (1 - decay)) # with tf.control_dependencies([train_mean, train_var]): # self.outputs = act(tf.nn.batch_normalization(self.inputs, # batch_mean, batch_var, beta, gamma, epsilon)) # else: # self.outputs = act(tf.nn.batch_normalization(self.inputs, # pop_mean, pop_var, beta, gamma, epsilon)) # # self.outputs = act( tf.nn.batch_normalization(self.inputs, mean, variance, beta, gamma, epsilon) ) # # variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name) # 8 params in TF12 if zero_debias=True # variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=vs.name) # 2 params beta, gamma # # variables = [beta, gamma, moving_mean, moving_variance] # # # print(len(variables)) # # for idx, v in enumerate(variables): # # print(" var {:3}: {:15} {}".format(idx, str(v.get_shape()), v.name)) # # exit() # # self.all_layers = list(layer.all_layers) # self.all_params = list(layer.all_params) # self.all_drop = dict(layer.all_drop) # self.all_layers.extend( [self.outputs] ) # self.all_params.extend( variables ) # class BatchNormLayer3(Layer): # don't work http://r2rt.com/implementing-batch-normalization-in-tensorflow.html # """ # The :class:`BatchNormLayer` class is a normalization layer, see ``tf.nn.batch_normalization`` and ``tf.nn.moments``. # # Batch normalization on fully-connected or convolutional maps. # # Parameters # ----------- # layer : a :class:`Layer` instance # The `Layer` class feeding into this layer. # decay : float # A decay factor for ExponentialMovingAverage. # epsilon : float # A small float number to avoid dividing by 0. # act : activation function. # is_train : boolean # Whether train or inference. # beta_init : beta initializer # The initializer for initializing beta # gamma_init : gamma initializer # The initializer for initializing gamma # name : a string or None # An optional name to attach to this layer. # # References # ---------- # - `Source <https://github.com/ry/tensorflow-resnet/blob/master/resnet.py>`_ # - `stackoverflow <http://stackoverflow.com/questions/38312668/how-does-one-do-inference-with-batch-normalization-with-tensor-flow>`_ # """ # def __init__( # self, # layer = None, # decay = 0.999, # epsilon = 0.00001, # act = tf.identity, # is_train = None, # beta_init = tf.zeros_initializer, # # gamma_init = tf.ones_initializer, # gamma_init = tf.random_normal_initializer(mean=1.0, stddev=0.002), # name ='batchnorm_layer', # ): # """ # Batch normalization on convolutional maps. # Ref.: http://stackoverflow.com/questions/33949786/how-could-i-use-batch-normalization-in-tensorflow # Args: # x: Tensor, 4D BHWD input maps # n_out: integer, depth of input maps # phase_train: boolean tf.Varialbe, true indicates training phase # scope: string, variable scope # Return: # normed: batch-normalized maps # """ # Layer.__init__(self, name=name) # self.inputs = layer.outputs # print(" [TL] BatchNormLayer %s: decay: %f, epsilon: %f, act: %s, is_train: %s" % # (self.name, decay, epsilon, act.__name__, is_train)) # x_shape = self.inputs.get_shape() # params_shape = x_shape[-1:] # # if is_train: # phase_train = tf.cast(tf.ones([]), tf.bool) # else: # phase_train = tf.cast(tf.zeros([]), tf.bool) # # with tf.variable_scope(name) as vs: # gamma = tf.get_variable("gamma", shape=params_shape, # initializer=gamma_init) # beta = tf.get_variable("beta", shape=params_shape, # initializer=beta_init) # batch_mean, batch_var = tf.nn.moments(self.inputs, list(range(len(x_shape) - 1)),#[0,1,2], # name='moments') # ema = tf.train.ExponentialMovingAverage(decay=decay) # # def mean_var_with_update(): # ema_apply_op = ema.apply([batch_mean, batch_var]) # with tf.control_dependencies([ema_apply_op]): # return tf.identity(batch_mean), tf.identity(batch_var) # # mean, var = tf.cond(phase_train, # mean_var_with_update, # lambda: (ema.average(batch_mean), ema.average(batch_var))) # normed = tf.nn.batch_normalization(self.inputs, mean, var, beta, gamma, epsilon) # self.outputs = act( normed ) # variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=vs.name) # 2 params beta, gamma # # variables = [beta, gamma, moving_mean, moving_variance] # # # print(len(variables)) # # for idx, v in enumerate(variables): # # print(" var {:3}: {:15} {}".format(idx, str(v.get_shape()), v.name)) # # exit() # # self.all_layers = list(layer.all_layers) # self.all_params = list(layer.all_params) # self.all_drop = dict(layer.all_drop) # self.all_layers.extend( [self.outputs] ) # self.all_params.extend( variables ) # class BatchNormLayer_old(Layer): # don't work # """ # The :class:`BatchNormLayer` class is a normalization layer, see ``tf.nn.batch_normalization``. # # Batch normalization on fully-connected or convolutional maps. # # Parameters # ----------- # layer : a :class:`Layer` instance # The `Layer` class feeding into this layer. # decay : float # A decay factor for ExponentialMovingAverage. # epsilon : float # A small float number to avoid dividing by 0. # is_train : boolean # Whether train or inference. # name : a string or None # An optional name to attach to this layer. # # References # ---------- # - `tf.nn.batch_normalization <https://github.com/tensorflow/tensorflow/blob/master/tensorflow/g3doc/api_docs/python/functions_and_classes/shard8/tf.nn.batch_normalization.md>`_ # - `stackoverflow <http://stackoverflow.com/questions/33949786/how-could-i-use-batch-normalization-in-tensorflow>`_ # - `tensorflow.contrib <https://github.com/tensorflow/tensorflow/blob/b826b79718e3e93148c3545e7aa3f90891744cc0/tensorflow/contrib/layers/python/layers/layers.py#L100>`_ # """ # def __init__( # self, # layer = None, # act = tf.identity, # decay = 0.999, # epsilon = 0.001, # is_train = None, # name ='batchnorm_layer', # ): # Layer.__init__(self, name=name) # self.inputs = layer.outputs # print(" [TL] BatchNormLayer %s: decay: %f, epsilon: %f, is_train: %s" % # (self.name, decay, epsilon, is_train)) # if is_train == None: # raise Exception("is_train must be True or False") # # # (name, input_var, decay, epsilon, is_train) # inputs_shape = self.inputs.get_shape() # axis = list(range(len(inputs_shape) - 1)) # params_shape = inputs_shape[-1:] # # with tf.variable_scope(name) as vs: # beta = tf.get_variable(name='beta', shape=params_shape, # initializer=tf.constant_initializer(0.0)) # gamma = tf.get_variable(name='gamma', shape=params_shape, # initializer=tf.constant_initializer(1.0)) # batch_mean, batch_var = tf.nn.moments(self.inputs, # axis, # name='moments') # ema = tf.train.ExponentialMovingAverage(decay=decay) # # def mean_var_with_update(): # ema_apply_op = ema.apply([batch_mean, batch_var]) # with tf.control_dependencies([ema_apply_op]): # return tf.identity(batch_mean), tf.identity(batch_var) # # if is_train: # is_train = tf.cast(tf.ones(1), tf.bool) # else: # is_train = tf.cast(tf.zeros(1), tf.bool) # # is_train = tf.reshape(is_train, []) # # # print(is_train) # # exit() # # mean, var = tf.cond( # is_train, # mean_var_with_update, # lambda: (ema.average(batch_mean), ema.average(batch_var)) # ) # normed = tf.nn.batch_normalization( # x=self.inputs, # mean=mean, # variance=var, # offset=beta, # scale=gamma, # variance_epsilon=epsilon, # name='tf_bn' # ) # self.outputs = act( normed ) # # self.all_layers = list(layer.all_layers) # self.all_params = list(layer.all_params) # self.all_drop = dict(layer.all_drop) # self.all_layers.extend( [self.outputs] ) # self.all_params.extend( [beta, gamma] ) ## Pooling layer class PoolLayer(Layer): """ The :class:`PoolLayer` class is a Pooling layer, you can choose ``tf.nn.max_pool`` and ``tf.nn.avg_pool`` for 2D or ``tf.nn.max_pool3d`` and ``tf.nn.avg_pool3d`` for 3D. Parameters ---------- layer : a :class:`Layer` instance The `Layer` class feeding into this layer. ksize : a list of ints that has length >= 4. The size of the window for each dimension of the input tensor. strides : a list of ints that has length >= 4. The stride of the sliding window for each dimension of the input tensor. padding : a string from: "SAME", "VALID". The type of padding algorithm to use. pool : a pooling function - see `TensorFlow pooling APIs <https://www.tensorflow.org/versions/master/api_docs/python/nn.html#pooling>`_ - class ``tf.nn.max_pool`` - class ``tf.nn.avg_pool`` - class ``tf.nn.max_pool3d`` - class ``tf.nn.avg_pool3d`` name : a string or None An optional name to attach to this layer. Examples -------- - see :class:`Conv2dLayer`. """ def __init__( self, layer = None, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', pool = tf.nn.max_pool, name ='pool_layer', ): Layer.__init__(self, name=name) self.inputs = layer.outputs print(" [TL] PoolLayer %s: ksize:%s strides:%s padding:%s pool:%s" % (self.name, str(ksize), str(strides), padding, pool.__name__)) self.outputs = pool(self.inputs, ksize=ksize, strides=strides, padding=padding, name=name) self.all_layers = list(layer.all_layers) self.all_params = list(layer.all_params) self.all_drop = dict(layer.all_drop) self.all_layers.extend( [self.outputs] ) ## Padding layer class PadLayer(Layer): """ The :class:`PadLayer` class is a Padding layer for any modes and dimensions. Please see `tf.pad <https://www.tensorflow.org/api_docs/python/tf/pad>`_ for usage. Parameters ---------- layer : a :class:`Layer` instance The `Layer` class feeding into this layer. padding : a Tensor of type int32. mode : one of "CONSTANT", "REFLECT", or "SYMMETRIC" (case-insensitive) name : a string or None An optional name to attach to this layer. """ def __init__( self, layer = None, paddings = None, mode = 'CONSTANT', name = 'pad_layer', ): Layer.__init__(self, name=name) assert paddings is not None, "paddings should be a Tensor of type int32. see https://www.tensorflow.org/api_docs/python/tf/pad" self.inputs = layer.outputs print(" [TL] PadLayer %s: paddings:%s mode:%s" % (self.name, list(paddings.get_shape()), mode)) self.outputs = tf.pad(self.inputs, paddings=paddings, mode=mode, name=name) self.all_layers = list(layer.all_layers) self.all_params = list(layer.all_params) self.all_drop = dict(layer.all_drop) self.all_layers.extend( [self.outputs] ) ## TimeDistributedLayer class TimeDistributedLayer(Layer): """ The :class:`TimeDistributedLayer` class that applies a function to every timestep of the input tensor. For example, if using :class:`DenseLayer` as the ``layer_class``, inputs [batch_size , length, dim] outputs [batch_size , length, new_dim]. Parameters ---------- layer : a :class:`Layer` instance The `Layer` class feeding into this layer, [batch_size , length, dim] layer_class : a :class:`Layer` class args : dictionary The arguments for the ``layer_class``. name : a string or None An optional name to attach to this layer. Examples -------- >>> batch_size = 32 >>> timestep = 20 >>> input_dim = 100 >>> x = tf.placeholder(dtype=tf.float32, shape=[batch_size, timestep, input_dim], name="encode_seqs") >>> net = InputLayer(x, name='input') >>> net = TimeDistributedLayer(net, layer_class=DenseLayer, args={'n_units':50, 'name':'dense'}, name='time_dense') ... [TL] InputLayer input: (32, 20, 100) ... [TL] TimeDistributedLayer time_dense: layer_class:DenseLayer >>> print(net.outputs._shape) ... (32, 20, 50) >>> net.print_params(False) ... param 0: (100, 50) time_dense/dense/W:0 ... param 1: (50,) time_dense/dense/b:0 ... num of params: 5050 """ def __init__( self, layer = None, layer_class = None, args = {}, name ='time_distributed', ): Layer.__init__(self, name=name) self.inputs = layer.outputs print(" [TL] TimeDistributedLayer %s: layer_class:%s args:%s" % (self.name, layer_class.__name__, args)) if not args: args = dict() assert isinstance(args, dict), "'args' must be a dict." if not isinstance(self.inputs, tf.Tensor): self.inputs = tf.transpose(tf.stack(self.inputs), [1, 0, 2]) input_shape = self.inputs.get_shape() timestep = input_shape[1] x = tf.unstack(self.inputs, axis=1) with ops.suppress_stdout(): for i in range(0, timestep): with tf.variable_scope(name, reuse=(False if i==0 else True)) as vs: set_name_reuse((False if i==0 else True)) net = layer_class(InputLayer(x[i], name=args['name']+str(i)), **args) # net = layer_class(InputLayer(x[i], name="input_"+args['name']), **args) x[i] = net.outputs variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name) self.outputs = tf.stack(x, axis=1, name=name) self.all_layers = list(layer.all_layers) self.all_params = list(layer.all_params) self.all_drop = dict(layer.all_drop) self.all_layers.extend( [self.outputs] ) self.all_params.extend( variables ) ## Recurrent layer class RNNLayer(Layer): """ The :class:`RNNLayer` class is a RNN layer, you can implement vanilla RNN, LSTM and GRU with it. Parameters ---------- layer : a :class:`Layer` instance The `Layer` class feeding into this layer. cell_fn : a TensorFlow's core RNN cell as follow (Note TF1.0+ and TF1.0- are different). - see `RNN Cells in TensorFlow <https://www.tensorflow.org/api_docs/python/>`_ cell_init_args : a dictionary The arguments for the cell initializer. n_hidden : a int The number of hidden units in the layer. initializer : initializer The initializer for initializing the parameters. n_steps : a int The sequence length. initial_state : None or RNN State If None, initial_state is zero_state. return_last : boolean - If True, return the last output, "Sequence input and single output" - If False, return all outputs, "Synced sequence input and output" - In other word, if you want to apply one or more RNN(s) on this layer, set to False. return_seq_2d : boolean - When return_last = False - If True, return 2D Tensor [n_example, n_hidden], for stacking DenseLayer after it. - If False, return 3D Tensor [n_example/n_steps, n_steps, n_hidden], for stacking multiple RNN after it. name : a string or None An optional name to attach to this layer. Variables -------------- outputs : a tensor The output of this RNN. return_last = False, outputs = all cell_output, which is the hidden state. cell_output.get_shape() = (?, n_hidden) final_state : a tensor or StateTuple When state_is_tuple = False, it is the final hidden and cell states, states.get_shape() = [?, 2 * n_hidden].\n When state_is_tuple = True, it stores two elements: (c, h), in that order. You can get the final state after each iteration during training, then feed it to the initial state of next iteration. initial_state : a tensor or StateTuple It is the initial state of this RNN layer, you can use it to initialize your state at the begining of each epoch or iteration according to your training procedure. batch_size : int or tensor Is int, if able to compute the batch_size, otherwise, tensor for ``?``. Examples -------- - For words >>> input_data = tf.placeholder(tf.int32, [batch_size, num_steps]) >>> network = tl.layers.EmbeddingInputlayer( ... inputs = input_data, ... vocabulary_size = vocab_size, ... embedding_size = hidden_size, ... E_init = tf.random_uniform_initializer(-init_scale, init_scale), ... name ='embedding_layer') >>> if is_training: >>> network = tl.layers.DropoutLayer(network, keep=keep_prob, name='drop1') >>> network = tl.layers.RNNLayer(network, ... cell_fn=tf.nn.rnn_cell.BasicLSTMCell, ... cell_init_args={'forget_bias': 0.0},# 'state_is_tuple': True}, ... n_hidden=hidden_size, ... initializer=tf.random_uniform_initializer(-init_scale, init_scale), ... n_steps=num_steps, ... return_last=False, ... name='basic_lstm_layer1') >>> lstm1 = network >>> if is_training: >>> network = tl.layers.DropoutLayer(network, keep=keep_prob, name='drop2') >>> network = tl.layers.RNNLayer(network, ... cell_fn=tf.nn.rnn_cell.BasicLSTMCell, ... cell_init_args={'forget_bias': 0.0}, # 'state_is_tuple': True}, ... n_hidden=hidden_size, ... initializer=tf.random_uniform_initializer(-init_scale, init_scale), ... n_steps=num_steps, ... return_last=False, ... return_seq_2d=True, ... name='basic_lstm_layer2') >>> lstm2 = network >>> if is_training: >>> network = tl.layers.DropoutLayer(network, keep=keep_prob, name='drop3') >>> network = tl.layers.DenseLayer(network, ... n_units=vocab_size, ... W_init=tf.random_uniform_initializer(-init_scale, init_scale), ... b_init=tf.random_uniform_initializer(-init_scale, init_scale), ... act = tl.activation.identity, name='output_layer') - For CNN+LSTM >>> x = tf.placeholder(tf.float32, shape=[batch_size, image_size, image_size, 1]) >>> network = tl.layers.InputLayer(x, name='input_layer') >>> network = tl.layers.Conv2dLayer(network, ... act = tf.nn.relu, ... shape = [5, 5, 1, 32], # 32 features for each 5x5 patch ... strides=[1, 2, 2, 1], ... padding='SAME', ... name ='cnn_layer1') >>> network = tl.layers.PoolLayer(network, ... ksize=[1, 2, 2, 1], ... strides=[1, 2, 2, 1], ... padding='SAME', ... pool = tf.nn.max_pool, ... name ='pool_layer1') >>> network = tl.layers.Conv2dLayer(network, ... act = tf.nn.relu, ... shape = [5, 5, 32, 10], # 10 features for each 5x5 patch ... strides=[1, 2, 2, 1], ... padding='SAME', ... name ='cnn_layer2') >>> network = tl.layers.PoolLayer(network, ... ksize=[1, 2, 2, 1], ... strides=[1, 2, 2, 1], ... padding='SAME', ... pool = tf.nn.max_pool, ... name ='pool_layer2') >>> network = tl.layers.FlattenLayer(network, name='flatten_layer') >>> network = tl.layers.ReshapeLayer(network, shape=[-1, num_steps, int(network.outputs._shape[-1])]) >>> rnn1 = tl.layers.RNNLayer(network, ... cell_fn=tf.nn.rnn_cell.LSTMCell, ... cell_init_args={}, ... n_hidden=200, ... initializer=tf.random_uniform_initializer(-0.1, 0.1), ... n_steps=num_steps, ... return_last=False, ... return_seq_2d=True, ... name='rnn_layer') >>> network = tl.layers.DenseLayer(rnn1, n_units=3, ... act = tl.activation.identity, name='output_layer') Notes ----- Input dimension should be rank 3 : [batch_size, n_steps, n_features], if no, please see :class:`ReshapeLayer`. References ---------- - `Neural Network RNN Cells in TensorFlow <https://www.tensorflow.org/api_docs/python/rnn_cell/>`_ - `tensorflow/python/ops/rnn.py <https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/ops/rnn.py>`_ - `tensorflow/python/ops/rnn_cell.py <https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/ops/rnn_cell.py>`_ - see TensorFlow tutorial ``ptb_word_lm.py``, TensorLayer tutorials ``tutorial_ptb_lstm*.py`` and ``tutorial_generate_text.py`` """ def __init__( self, layer = None, cell_fn = None,#tf.nn.rnn_cell.BasicRNNCell, cell_init_args = {}, n_hidden = 100, initializer = tf.random_uniform_initializer(-0.1, 0.1), n_steps = 5, initial_state = None, return_last = False, # is_reshape = True, return_seq_2d = False, name = 'rnn_layer', ): Layer.__init__(self, name=name) if cell_fn is None: raise Exception("Please put in cell_fn") if 'GRU' in cell_fn.__name__: try: cell_init_args.pop('state_is_tuple') except: pass self.inputs = layer.outputs print(" [TL] RNNLayer %s: n_hidden:%d n_steps:%d in_dim:%d in_shape:%s cell_fn:%s " % (self.name, n_hidden, n_steps, self.inputs.get_shape().ndims, self.inputs.get_shape(), cell_fn.__name__)) # You can get the dimension by .get_shape() or ._shape, and check the # dimension by .with_rank() as follow. # self.inputs.get_shape().with_rank(2) # self.inputs.get_shape().with_rank(3) # Input dimension should be rank 3 [batch_size, n_steps(max), n_features] try: self.inputs.get_shape().with_rank(3) except: raise Exception("RNN : Input dimension should be rank 3 : [batch_size, n_steps, n_features]") # is_reshape : boolean (deprecate) # Reshape the inputs to 3 dimension tensor.\n # If input is[batch_size, n_steps, n_features], we do not need to reshape it.\n # If input is [batch_size * n_steps, n_features], we need to reshape it. # if is_reshape: # self.inputs = tf.reshape(self.inputs, shape=[-1, n_steps, int(self.inputs._shape[-1])]) fixed_batch_size = self.inputs.get_shape().with_rank_at_least(1)[0] if fixed_batch_size.value: batch_size = fixed_batch_size.value print(" RNN batch_size (concurrent processes): %d" % batch_size) else: from tensorflow.python.ops import array_ops batch_size = array_ops.shape(self.inputs)[0] print(" non specified batch_size, uses a tensor instead.") self.batch_size = batch_size # Simplified version of tensorflow.models.rnn.rnn.py's rnn(). # This builds an unrolled LSTM for tutorial purposes only. # In general, use the rnn() or state_saving_rnn() from rnn.py. # # The alternative version of the code below is: # # from tensorflow.models.rnn import rnn # inputs = [tf.squeeze(input_, [1]) # for input_ in tf.split(1, num_steps, inputs)] # outputs, state = rnn.rnn(cell, inputs, initial_state=self._initial_state) outputs = [] if 'reuse' in inspect.getargspec(cell_fn.__init__).args: self.cell = cell = cell_fn(num_units=n_hidden, reuse=tf.get_variable_scope().reuse, **cell_init_args) else: self.cell = cell = cell_fn(num_units=n_hidden, **cell_init_args) if initial_state is None: self.initial_state = cell.zero_state(batch_size, dtype=tf.float32) # 1.2.3 state = self.initial_state # with tf.variable_scope("model", reuse=None, initializer=initializer): with tf.variable_scope(name, initializer=initializer) as vs: for time_step in range(n_steps): if time_step > 0: tf.get_variable_scope().reuse_variables() (cell_output, state) = cell(self.inputs[:, time_step, :], state) outputs.append(cell_output) # Retrieve just the RNN variables. # rnn_variables = [v for v in tf.all_variables() if v.name.startswith(vs.name)] rnn_variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name) print(" n_params : %d" % (len(rnn_variables))) if return_last: # 2D Tensor [batch_size, n_hidden] self.outputs = outputs[-1] else: if return_seq_2d: # PTB tutorial: stack dense layer after that, or compute the cost from the output # 2D Tensor [n_example, n_hidden] try: # TF1.0 self.outputs = tf.reshape(tf.concat(outputs, 1), [-1, n_hidden]) except: # TF0.12 self.outputs = tf.reshape(tf.concat(1, outputs), [-1, n_hidden]) else: # <akara>: stack more RNN layer after that # 3D Tensor [n_example/n_steps, n_steps, n_hidden] try: # TF1.0 self.outputs = tf.reshape(tf.concat(outputs, 1), [-1, n_steps, n_hidden]) except: # TF0.12 self.outputs = tf.reshape(tf.concat(1, outputs), [-1, n_steps, n_hidden]) self.final_state = state self.all_layers = list(layer.all_layers) self.all_params = list(layer.all_params) self.all_drop = dict(layer.all_drop) # print(type(self.outputs)) self.all_layers.extend( [self.outputs] ) self.all_params.extend( rnn_variables ) class BiRNNLayer(Layer): """ The :class:`BiRNNLayer` class is a Bidirectional RNN layer. Parameters ---------- layer : a :class:`Layer` instance The `Layer` class feeding into this layer. cell_fn : a TensorFlow's core RNN cell as follow (Note TF1.0+ and TF1.0- are different). - see `RNN Cells in TensorFlow <https://www.tensorflow.org/api_docs/python/>`_ cell_init_args : a dictionary The arguments for the cell initializer. n_hidden : a int The number of hidden units in the layer. initializer : initializer The initializer for initializing the parameters. n_steps : a int The sequence length. fw_initial_state : None or forward RNN State If None, initial_state is zero_state. bw_initial_state : None or backward RNN State If None, initial_state is zero_state. dropout : `tuple` of `float`: (input_keep_prob, output_keep_prob). The input and output keep probability. n_layer : a int, default is 1. The number of RNN layers. return_last : boolean - If True, return the last output, "Sequence input and single output" - If False, return all outputs, "Synced sequence input and output" - In other word, if you want to apply one or more RNN(s) on this layer, set to False. return_seq_2d : boolean - When return_last = False - If True, return 2D Tensor [n_example, n_hidden], for stacking DenseLayer after it. - If False, return 3D Tensor [n_example/n_steps, n_steps, n_hidden], for stacking multiple RNN after it. name : a string or None An optional name to attach to this layer. Variables -------------- outputs : a tensor The output of this RNN. return_last = False, outputs = all cell_output, which is the hidden state. cell_output.get_shape() = (?, n_hidden) fw(bw)_final_state : a tensor or StateTuple When state_is_tuple = False, it is the final hidden and cell states, states.get_shape() = [?, 2 * n_hidden].\n When state_is_tuple = True, it stores two elements: (c, h), in that order. You can get the final state after each iteration during training, then feed it to the initial state of next iteration. fw(bw)_initial_state : a tensor or StateTuple It is the initial state of this RNN layer, you can use it to initialize your state at the begining of each epoch or iteration according to your training procedure. batch_size : int or tensor Is int, if able to compute the batch_size, otherwise, tensor for ``?``. Notes ----- - Input dimension should be rank 3 : [batch_size, n_steps, n_features], if no, please see :class:`ReshapeLayer`. - For predicting, the sequence length has to be the same with the sequence length of training, while, for normal RNN, we can use sequence length of 1 for predicting. References ---------- - `Source <https://github.com/akaraspt/deepsleep/blob/master/deepsleep/model.py>`_ """ def __init__( self, layer = None, cell_fn = None, #tf.nn.rnn_cell.LSTMCell, cell_init_args = {'use_peepholes':True, 'state_is_tuple':True}, n_hidden = 100, initializer = tf.random_uniform_initializer(-0.1, 0.1), n_steps = 5, fw_initial_state = None, bw_initial_state = None, dropout = None, n_layer = 1, return_last = False, return_seq_2d = False, name = 'birnn_layer', ): Layer.__init__(self, name=name) if cell_fn is None: raise Exception("Please put in cell_fn") if 'GRU' in cell_fn.__name__: try: cell_init_args.pop('state_is_tuple') except: pass self.inputs = layer.outputs print(" [TL] BiRNNLayer %s: n_hidden:%d n_steps:%d in_dim:%d in_shape:%s cell_fn:%s dropout:%s n_layer:%d " % (self.name, n_hidden, n_steps, self.inputs.get_shape().ndims, self.inputs.get_shape(), cell_fn.__name__, dropout, n_layer)) fixed_batch_size = self.inputs.get_shape().with_rank_at_least(1)[0] if fixed_batch_size.value: self.batch_size = fixed_batch_size.value print(" RNN batch_size (concurrent processes): %d" % self.batch_size) else: from tensorflow.python.ops import array_ops self.batch_size = array_ops.shape(self.inputs)[0] print(" non specified batch_size, uses a tensor instead.") # Input dimension should be rank 3 [batch_size, n_steps(max), n_features] try: self.inputs.get_shape().with_rank(3) except: raise Exception("RNN : Input dimension should be rank 3 : [batch_size, n_steps, n_features]") with tf.variable_scope(name, initializer=initializer) as vs: rnn_creator = lambda: cell_fn(num_units=n_hidden, **cell_init_args) # Apply dropout if dropout: if type(dropout) in [tuple, list]: in_keep_prob = dropout[0] out_keep_prob = dropout[1] elif isinstance(dropout, float): in_keep_prob, out_keep_prob = dropout, dropout else: raise Exception("Invalid dropout type (must be a 2-D tuple of " "float)") try: # TF 1.0 DropoutWrapper_fn = tf.contrib.rnn.DropoutWrapper except: DropoutWrapper_fn = tf.nn.rnn_cell.DropoutWrapper cell_creator = lambda: DropoutWrapper_fn(rnn_creator(), input_keep_prob=in_keep_prob, output_keep_prob=1.0) # out_keep_prob) else: cell_creator = rnn_creator self.fw_cell = cell_creator() self.bw_cell = cell_creator() # Apply multiple layers if n_layer > 1: try: # TF1.0 MultiRNNCell_fn = tf.contrib.rnn.MultiRNNCell except: MultiRNNCell_fn = tf.nn.rnn_cell.MultiRNNCell try: self.fw_cell = MultiRNNCell_fn([cell_creator() for _ in range(n_layer)], state_is_tuple=True) self.bw_cell = MultiRNNCell_fn([cell_creator() for _ in range(n_layer)], state_is_tuple=True) except: self.fw_cell = MultiRNNCell_fn([cell_creator() for _ in range(n_layer)]) self.bw_cell = MultiRNNCell_fn([cell_creator() for _ in range(n_layer)]) # Initial state of RNN if fw_initial_state is None: self.fw_initial_state = self.fw_cell.zero_state(self.batch_size, dtype=tf.float32) else: self.fw_initial_state = fw_initial_state if bw_initial_state is None: self.bw_initial_state = self.bw_cell.zero_state(self.batch_size, dtype=tf.float32) else: self.bw_initial_state = bw_initial_state # exit() # Feedforward to MultiRNNCell try: ## TF1.0 list_rnn_inputs = tf.unstack(self.inputs, axis=1) except: ## TF0.12 list_rnn_inputs = tf.unpack(self.inputs, axis=1) try: # TF1.0 bidirectional_rnn_fn = tf.contrib.rnn.static_bidirectional_rnn except: bidirectional_rnn_fn = tf.nn.bidirectional_rnn outputs, fw_state, bw_state = bidirectional_rnn_fn( # outputs, fw_state, bw_state = tf.contrib.rnn.static_bidirectional_rnn( cell_fw=self.fw_cell, cell_bw=self.bw_cell, inputs=list_rnn_inputs, initial_state_fw=self.fw_initial_state, initial_state_bw=self.bw_initial_state ) if return_last: self.outputs = outputs[-1] else: self.outputs = outputs if return_seq_2d: # 2D Tensor [n_example, n_hidden] try: # TF1.0 self.outputs = tf.reshape(tf.concat(outputs, 1), [-1, n_hidden*2]) except: # TF0.12 self.outputs = tf.reshape(tf.concat(1, outputs), [-1, n_hidden*2]) else: # <akara>: stack more RNN layer after that # 3D Tensor [n_example/n_steps, n_steps, n_hidden] try: # TF1.0 self.outputs = tf.reshape(tf.concat(outputs,1), [-1, n_steps, n_hidden*2]) except: # TF0.12 self.outputs = tf.reshape(tf.concat(1, outputs), [-1, n_steps, n_hidden*2]) self.fw_final_state = fw_state self.bw_final_state = bw_state # Retrieve just the RNN variables. rnn_variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name) print(" n_params : %d" % (len(rnn_variables))) self.all_layers = list(layer.all_layers) self.all_params = list(layer.all_params) self.all_drop = dict(layer.all_drop) self.all_layers.extend( [self.outputs] ) self.all_params.extend( rnn_variables ) # Advanced Ops for Dynamic RNN def advanced_indexing_op(input, index): """Advanced Indexing for Sequences, returns the outputs by given sequence lengths. When return the last output :class:`DynamicRNNLayer` uses it to get the last outputs with the sequence lengths. Parameters ----------- input : tensor for data [batch_size, n_step(max), n_features] index : tensor for indexing, i.e. sequence_length in Dynamic RNN. [batch_size] Examples --------- >>> batch_size, max_length, n_features = 3, 5, 2 >>> z = np.random.uniform(low=-1, high=1, size=[batch_size, max_length, n_features]).astype(np.float32) >>> b_z = tf.constant(z) >>> sl = tf.placeholder(dtype=tf.int32, shape=[batch_size]) >>> o = advanced_indexing_op(b_z, sl) >>> >>> sess = tf.InteractiveSession() >>> tl.layers.initialize_global_variables(sess) >>> >>> order = np.asarray([1,1,2]) >>> print("real",z[0][order[0]-1], z[1][order[1]-1], z[2][order[2]-1]) >>> y = sess.run([o], feed_dict={sl:order}) >>> print("given",order) >>> print("out", y) ... real [-0.93021595 0.53820813] [-0.92548317 -0.77135968] [ 0.89952248 0.19149846] ... given [1 1 2] ... out [array([[-0.93021595, 0.53820813], ... [-0.92548317, -0.77135968], ... [ 0.89952248, 0.19149846]], dtype=float32)] References ----------- - Modified from TFlearn (the original code is used for fixed length rnn), `references <https://github.com/tflearn/tflearn/blob/master/tflearn/layers/recurrent.py>`_. """ batch_size = tf.shape(input)[0] # max_length = int(input.get_shape()[1]) # for fixed length rnn, length is given max_length = tf.shape(input)[1] # for dynamic_rnn, length is unknown dim_size = int(input.get_shape()[2]) index = tf.range(0, batch_size) * max_length + (index - 1) flat = tf.reshape(input, [-1, dim_size]) relevant = tf.gather(flat, index) return relevant def retrieve_seq_length_op(data): """An op to compute the length of a sequence from input shape of [batch_size, n_step(max), n_features], it can be used when the features of padding (on right hand side) are all zeros. Parameters ----------- data : tensor [batch_size, n_step(max), n_features] with zero padding on right hand side. Examples --------- >>> data = [[[1],[2],[0],[0],[0]], ... [[1],[2],[3],[0],[0]], ... [[1],[2],[6],[1],[0]]] >>> data = np.asarray(data) >>> print(data.shape) ... (3, 5, 1) >>> data = tf.constant(data) >>> sl = retrieve_seq_length_op(data) >>> sess = tf.InteractiveSession() >>> tl.layers.initialize_global_variables(sess) >>> y = sl.eval() ... [2 3 4] - Multiple features >>> data = [[[1,2],[2,2],[1,2],[1,2],[0,0]], ... [[2,3],[2,4],[3,2],[0,0],[0,0]], ... [[3,3],[2,2],[5,3],[1,2],[0,0]]] >>> sl ... [4 3 4] References ------------ - Borrow from `TFlearn <https://github.com/tflearn/tflearn/blob/master/tflearn/layers/recurrent.py>`_. """ with tf.name_scope('GetLength'): ## TF 1.0 change reduction_indices to axis used = tf.sign(tf.reduce_max(tf.abs(data), 2)) length = tf.reduce_sum(used, 1) ## TF < 1.0 # used = tf.sign(tf.reduce_max(tf.abs(data), reduction_indices=2)) # length = tf.reduce_sum(used, reduction_indices=1) length = tf.cast(length, tf.int32) return length def retrieve_seq_length_op2(data): """An op to compute the length of a sequence, from input shape of [batch_size, n_step(max)], it can be used when the features of padding (on right hand side) are all zeros. Parameters ----------- data : tensor [batch_size, n_step(max)] with zero padding on right hand side. Examples -------- >>> data = [[1,2,0,0,0], ... [1,2,3,0,0], ... [1,2,6,1,0]] >>> o = retrieve_seq_length_op2(data) >>> sess = tf.InteractiveSession() >>> tl.layers.initialize_global_variables(sess) >>> print(o.eval()) ... [2 3 4] """ return tf.reduce_sum(tf.cast(tf.greater(data, tf.zeros_like(data)), tf.int32), 1) def retrieve_seq_length_op3(data, pad_val=0): # HangSheng: return tensor for sequence length, if input is tf.string data_shape_size = data.get_shape().ndims if data_shape_size == 3: return tf.reduce_sum(tf.cast(tf.reduce_any(tf.not_equal(data, pad_val), axis=2), dtype=tf.int32), 1) elif data_shape_size == 2: return tf.reduce_sum(tf.cast(tf.not_equal(data, pad_val), dtype=tf.int32), 1) elif data_shape_size == 1: raise ValueError("retrieve_seq_length_op3: data has wrong shape!") else: raise ValueError("retrieve_seq_length_op3: handling data_shape_size %s hasn't been implemented!" % (data_shape_size)) def target_mask_op(data, pad_val=0): # HangSheng: return tensor for mask,if input is tf.string data_shape_size = data.get_shape().ndims if data_shape_size == 3: return tf.cast(tf.reduce_any(tf.not_equal(data, pad_val), axis=2), dtype=tf.int32) elif data_shape_size == 2: return tf.cast(tf.not_equal(data, pad_val), dtype=tf.int32) elif data_shape_size == 1: raise ValueError("target_mask_op: data has wrong shape!") else: raise ValueError("target_mask_op: handling data_shape_size %s hasn't been implemented!" % (data_shape_size)) # Dynamic RNN class DynamicRNNLayer(Layer): """ The :class:`DynamicRNNLayer` class is a Dynamic RNN layer, see ``tf.nn.dynamic_rnn``. Parameters ---------- layer : a :class:`Layer` instance The `Layer` class feeding into this layer. cell_fn : a TensorFlow's core RNN cell as follow (Note TF1.0+ and TF1.0- are different). - see `RNN Cells in TensorFlow <https://www.tensorflow.org/api_docs/python/>`_ cell_init_args : a dictionary The arguments for the cell initializer. n_hidden : a int The number of hidden units in the layer. initializer : initializer The initializer for initializing the parameters. sequence_length : a tensor, array or None. The sequence length of each row of input data, see ``Advanced Ops for Dynamic RNN``. - If None, it uses ``retrieve_seq_length_op`` to compute the sequence_length, i.e. when the features of padding (on right hand side) are all zeros. - If using word embedding, you may need to compute the sequence_length from the ID array (the integer features before word embedding) by using ``retrieve_seq_length_op2`` or ``retrieve_seq_length_op``. - You can also input an numpy array. - More details about TensorFlow dynamic_rnn in `Wild-ML Blog <http://www.wildml.com/2016/08/rnns-in-tensorflow-a-practical-guide-and-undocumented-features/>`_. initial_state : None or RNN State If None, initial_state is zero_state. dropout : `tuple` of `float`: (input_keep_prob, output_keep_prob). The input and output keep probability. n_layer : a int, default is 1. The number of RNN layers. return_last : boolean - If True, return the last output, "Sequence input and single output" - If False, return all outputs, "Synced sequence input and output" - In other word, if you want to apply one or more RNN(s) on this layer, set to False. return_seq_2d : boolean - When return_last = False - If True, return 2D Tensor [n_example, n_hidden], for stacking DenseLayer or computing cost after it. - If False, return 3D Tensor [n_example/n_steps(max), n_steps(max), n_hidden], for stacking multiple RNN after it. name : a string or None An optional name to attach to this layer. Variables ------------ outputs : a tensor The output of this RNN. return_last = False, outputs = all cell_output, which is the hidden state. cell_output.get_shape() = (?, n_hidden) final_state : a tensor or StateTuple When state_is_tuple = False, it is the final hidden and cell states, states.get_shape() = [?, 2 * n_hidden].\n When state_is_tuple = True, it stores two elements: (c, h), in that order. You can get the final state after each iteration during training, then feed it to the initial state of next iteration. initial_state : a tensor or StateTuple It is the initial state of this RNN layer, you can use it to initialize your state at the begining of each epoch or iteration according to your training procedure. sequence_length : a tensor or array, shape = [batch_size] The sequence lengths computed by Advanced Opt or the given sequence lengths. Notes ----- Input dimension should be rank 3 : [batch_size, n_steps(max), n_features], if no, please see :class:`ReshapeLayer`. Examples -------- >>> input_seqs = tf.placeholder(dtype=tf.int64, shape=[batch_size, None], name="input_seqs") >>> network = tl.layers.EmbeddingInputlayer( ... inputs = input_seqs, ... vocabulary_size = vocab_size, ... embedding_size = embedding_size, ... name = 'seq_embedding') >>> network = tl.layers.DynamicRNNLayer(network, ... cell_fn = tf.contrib.rnn.BasicLSTMCell, # for TF0.2 tf.nn.rnn_cell.BasicLSTMCell, ... n_hidden = embedding_size, ... dropout = 0.7, ... sequence_length = tl.layers.retrieve_seq_length_op2(input_seqs), ... return_seq_2d = True, # stack denselayer or compute cost after it ... name = 'dynamic_rnn') ... network = tl.layers.DenseLayer(network, n_units=vocab_size, ... act=tf.identity, name="output") References ---------- - `Wild-ML Blog <http://www.wildml.com/2016/08/rnns-in-tensorflow-a-practical-guide-and-undocumented-features/>`_ - `dynamic_rnn.ipynb <https://github.com/dennybritz/tf-rnn/blob/master/dynamic_rnn.ipynb>`_ - `tf.nn.dynamic_rnn <https://github.com/tensorflow/tensorflow/blob/master/tensorflow/g3doc/api_docs/python/functions_and_classes/shard8/tf.nn.dynamic_rnn.md>`_ - `tflearn rnn <https://github.com/tflearn/tflearn/blob/master/tflearn/layers/recurrent.py>`_ - ``tutorial_dynamic_rnn.py`` """ def __init__( self, layer = None, cell_fn = None,#tf.nn.rnn_cell.LSTMCell, cell_init_args = {'state_is_tuple' : True}, n_hidden = 256, initializer = tf.random_uniform_initializer(-0.1, 0.1), sequence_length = None, initial_state = None, dropout = None, n_layer = 1, return_last = False, return_seq_2d = False, dynamic_rnn_init_args={}, name = 'dyrnn_layer', ): Layer.__init__(self, name=name) if cell_fn is None: raise Exception("Please put in cell_fn") if 'GRU' in cell_fn.__name__: try: cell_init_args.pop('state_is_tuple') except: pass self.inputs = layer.outputs print(" [TL] DynamicRNNLayer %s: n_hidden:%d, in_dim:%d in_shape:%s cell_fn:%s dropout:%s n_layer:%d" % (self.name, n_hidden, self.inputs.get_shape().ndims, self.inputs.get_shape(), cell_fn.__name__, dropout, n_layer)) # Input dimension should be rank 3 [batch_size, n_steps(max), n_features] try: self.inputs.get_shape().with_rank(3) except: raise Exception("RNN : Input dimension should be rank 3 : [batch_size, n_steps(max), n_features]") # Get the batch_size fixed_batch_size = self.inputs.get_shape().with_rank_at_least(1)[0] if fixed_batch_size.value: batch_size = fixed_batch_size.value print(" batch_size (concurrent processes): %d" % batch_size) else: from tensorflow.python.ops import array_ops batch_size = array_ops.shape(self.inputs)[0] print(" non specified batch_size, uses a tensor instead.") self.batch_size = batch_size # Creats the cell function # cell_instance_fn=lambda: cell_fn(num_units=n_hidden, **cell_init_args) # HanSheng rnn_creator = lambda: cell_fn(num_units=n_hidden, **cell_init_args) # Apply dropout if dropout: if type(dropout) in [tuple, list]: in_keep_prob = dropout[0] out_keep_prob = dropout[1] elif isinstance(dropout, float): in_keep_prob, out_keep_prob = dropout, dropout else: raise Exception("Invalid dropout type (must be a 2-D tuple of " "float)") try: # TF1.0 DropoutWrapper_fn = tf.contrib.rnn.DropoutWrapper except: DropoutWrapper_fn = tf.nn.rnn_cell.DropoutWrapper # cell_instance_fn1=cell_instance_fn # HanSheng # cell_instance_fn=DropoutWrapper_fn( # cell_instance_fn1(), # input_keep_prob=in_keep_prob, # output_keep_prob=out_keep_prob) cell_creator = lambda: DropoutWrapper_fn(rnn_creator(), input_keep_prob=in_keep_prob, output_keep_prob=1.0)#out_keep_prob) else: cell_creator = rnn_creator self.cell = cell_creator() # Apply multiple layers if n_layer > 1: try: MultiRNNCell_fn = tf.contrib.rnn.MultiRNNCell except: MultiRNNCell_fn = tf.nn.rnn_cell.MultiRNNCell # cell_instance_fn2=cell_instance_fn # HanSheng try: # cell_instance_fn=lambda: MultiRNNCell_fn([cell_instance_fn2() for _ in range(n_layer)], state_is_tuple=True) # HanSheng self.cell = MultiRNNCell_fn([cell_creator() for _ in range(n_layer)], state_is_tuple=True) except: # when GRU # cell_instance_fn=lambda: MultiRNNCell_fn([cell_instance_fn2() for _ in range(n_layer)]) # HanSheng self.cell = MultiRNNCell_fn([cell_creator() for _ in range(n_layer)]) if dropout: self.cell = DropoutWrapper_fn(self.cell, input_keep_prob=1.0, output_keep_prob=out_keep_prob) # self.cell=cell_instance_fn() # HanSheng # Initialize initial_state if initial_state is None: self.initial_state = self.cell.zero_state(batch_size, dtype=tf.float32) else: self.initial_state = initial_state # Computes sequence_length if sequence_length is None: try: ## TF1.0 sequence_length = retrieve_seq_length_op( self.inputs if isinstance(self.inputs, tf.Tensor) else tf.stack(self.inputs)) except: ## TF0.12 sequence_length = retrieve_seq_length_op( self.inputs if isinstance(self.inputs, tf.Tensor) else tf.pack(self.inputs)) # Main - Computes outputs and last_states with tf.variable_scope(name, initializer=initializer) as vs: outputs, last_states = tf.nn.dynamic_rnn( cell=self.cell, # inputs=X inputs = self.inputs, # dtype=tf.float64, sequence_length=sequence_length, initial_state = self.initial_state, **dynamic_rnn_init_args ) rnn_variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name) # print(" n_params : %d" % (len(rnn_variables))) # Manage the outputs if return_last: # [batch_size, n_hidden] # outputs = tf.transpose(tf.pack(outputs), [1, 0, 2]) # TF1.0 tf.pack --> tf.stack self.outputs = advanced_indexing_op(outputs, sequence_length) else: # [batch_size, n_step(max), n_hidden] # self.outputs = result[0]["outputs"] # self.outputs = outputs # it is 3d, but it is a list if return_seq_2d: # PTB tutorial: # 2D Tensor [n_example, n_hidden] try: # TF1.0 self.outputs = tf.reshape(tf.concat(outputs, 1), [-1, n_hidden]) except: # TF0.12 self.outputs = tf.reshape(tf.concat(1, outputs), [-1, n_hidden]) else: # <akara>: # 3D Tensor [batch_size, n_steps(max), n_hidden] max_length = tf.shape(outputs)[1] batch_size = tf.shape(outputs)[0] try: # TF1.0 self.outputs = tf.reshape(tf.concat(outputs, 1), [batch_size, max_length, n_hidden]) except: # TF0.12 self.outputs = tf.reshape(tf.concat(1, outputs), [batch_size, max_length, n_hidden]) # self.outputs = tf.reshape(tf.concat(1, outputs), [-1, max_length, n_hidden]) # Final state self.final_state = last_states self.sequence_length = sequence_length self.all_layers = list(layer.all_layers) self.all_params = list(layer.all_params) self.all_drop = dict(layer.all_drop) self.all_layers.extend( [self.outputs] ) self.all_params.extend( rnn_variables ) # Bidirectional Dynamic RNN class BiDynamicRNNLayer(Layer): """ The :class:`BiDynamicRNNLayer` class is a RNN layer, you can implement vanilla RNN, LSTM and GRU with it. Parameters ---------- layer : a :class:`Layer` instance The `Layer` class feeding into this layer. cell_fn : a TensorFlow's core RNN cell as follow (Note TF1.0+ and TF1.0- are different). - see `RNN Cells in TensorFlow <https://www.tensorflow.org/api_docs/python/>`_ cell_init_args : a dictionary The arguments for the cell initializer. n_hidden : a int The number of hidden units in the layer. initializer : initializer The initializer for initializing the parameters. sequence_length : a tensor, array or None The sequence length of each row of input data, see ``Advanced Ops for Dynamic RNN``. - If None, it uses ``retrieve_seq_length_op`` to compute the sequence_length, i.e. when the features of padding (on right hand side) are all zeros. - If using word embedding, you may need to compute the sequence_length from the ID array (the integer features before word embedding) by using ``retrieve_seq_length_op2`` or ``retrieve_seq_length_op``. - You can also input an numpy array. - More details about TensorFlow dynamic_rnn in `Wild-ML Blog <http://www.wildml.com/2016/08/rnns-in-tensorflow-a-practical-guide-and-undocumented-features/>`_. fw_initial_state : None or forward RNN State If None, initial_state is zero_state. bw_initial_state : None or backward RNN State If None, initial_state is zero_state. dropout : `tuple` of `float`: (input_keep_prob, output_keep_prob). The input and output keep probability. n_layer : a int, default is 1. The number of RNN layers. return_last : boolean If True, return the last output, "Sequence input and single output"\n If False, return all outputs, "Synced sequence input and output"\n In other word, if you want to apply one or more RNN(s) on this layer, set to False. return_seq_2d : boolean - When return_last = False - If True, return 2D Tensor [n_example, 2 * n_hidden], for stacking DenseLayer or computing cost after it. - If False, return 3D Tensor [n_example/n_steps(max), n_steps(max), 2 * n_hidden], for stacking multiple RNN after it. name : a string or None An optional name to attach to this layer. Variables ----------------------- outputs : a tensor The output of this RNN. return_last = False, outputs = all cell_output, which is the hidden state. cell_output.get_shape() = (?, 2 * n_hidden) fw(bw)_final_state : a tensor or StateTuple When state_is_tuple = False, it is the final hidden and cell states, states.get_shape() = [?, 2 * n_hidden].\n When state_is_tuple = True, it stores two elements: (c, h), in that order. You can get the final state after each iteration during training, then feed it to the initial state of next iteration. fw(bw)_initial_state : a tensor or StateTuple It is the initial state of this RNN layer, you can use it to initialize your state at the begining of each epoch or iteration according to your training procedure. sequence_length : a tensor or array, shape = [batch_size] The sequence lengths computed by Advanced Opt or the given sequence lengths. Notes ----- Input dimension should be rank 3 : [batch_size, n_steps(max), n_features], if no, please see :class:`ReshapeLayer`. References ---------- - `Wild-ML Blog <http://www.wildml.com/2016/08/rnns-in-tensorflow-a-practical-guide-and-undocumented-features/>`_ - `bidirectional_rnn.ipynb <https://github.com/dennybritz/tf-rnn/blob/master/bidirectional_rnn.ipynb>`_ """ def __init__( self, layer = None, cell_fn = None,#tf.nn.rnn_cell.LSTMCell, cell_init_args = {'state_is_tuple':True}, n_hidden = 256, initializer = tf.random_uniform_initializer(-0.1, 0.1), sequence_length = None, fw_initial_state = None, bw_initial_state = None, dropout = None, n_layer = 1, return_last = False, return_seq_2d = False, dynamic_rnn_init_args={}, name = 'bi_dyrnn_layer', ): Layer.__init__(self, name=name) if cell_fn is None: raise Exception("Please put in cell_fn") if 'GRU' in cell_fn.__name__: try: cell_init_args.pop('state_is_tuple') except: pass self.inputs = layer.outputs print(" [TL] BiDynamicRNNLayer %s: n_hidden:%d in_dim:%d in_shape:%s cell_fn:%s dropout:%s n_layer:%d" % (self.name, n_hidden, self.inputs.get_shape().ndims, self.inputs.get_shape(), cell_fn.__name__, dropout, n_layer)) # Input dimension should be rank 3 [batch_size, n_steps(max), n_features] try: self.inputs.get_shape().with_rank(3) except: raise Exception("RNN : Input dimension should be rank 3 : [batch_size, n_steps(max), n_features]") # Get the batch_size fixed_batch_size = self.inputs.get_shape().with_rank_at_least(1)[0] if fixed_batch_size.value: batch_size = fixed_batch_size.value print(" batch_size (concurrent processes): %d" % batch_size) else: from tensorflow.python.ops import array_ops batch_size = array_ops.shape(self.inputs)[0] print(" non specified batch_size, uses a tensor instead.") self.batch_size = batch_size with tf.variable_scope(name, initializer=initializer) as vs: # Creats the cell function # cell_instance_fn=lambda: cell_fn(num_units=n_hidden, **cell_init_args) # HanSheng rnn_creator = lambda: cell_fn(num_units=n_hidden, **cell_init_args) # Apply dropout if dropout: if type(dropout) in [tuple, list]: in_keep_prob = dropout[0] out_keep_prob = dropout[1] elif isinstance(dropout, float): in_keep_prob, out_keep_prob = dropout, dropout else: raise Exception("Invalid dropout type (must be a 2-D tuple of " "float)") try: DropoutWrapper_fn = tf.contrib.rnn.DropoutWrapper except: DropoutWrapper_fn = tf.nn.rnn_cell.DropoutWrapper # cell_instance_fn1=cell_instance_fn # HanSheng # cell_instance_fn=lambda: DropoutWrapper_fn( # cell_instance_fn1(), # input_keep_prob=in_keep_prob, # output_keep_prob=out_keep_prob) cell_creator = lambda: DropoutWrapper_fn(rnn_creator(), input_keep_prob=in_keep_prob, output_keep_prob=1.0) # out_keep_prob) else: cell_creator = rnn_creator self.fw_cell = cell_creator() self.bw_cell = cell_creator() # Apply multiple layers if n_layer > 1: try: MultiRNNCell_fn = tf.contrib.rnn.MultiRNNCell except: MultiRNNCell_fn = tf.nn.rnn_cell.MultiRNNCell # cell_instance_fn2=cell_instance_fn # HanSheng # cell_instance_fn=lambda: MultiRNNCell_fn([cell_instance_fn2() for _ in range(n_layer)]) self.fw_cell = MultiRNNCell_fn([cell_creator() for _ in range(n_layer)]) self.bw_cell = MultiRNNCell_fn([cell_creator() for _ in range(n_layer)]) # self.fw_cell=cell_instance_fn() # self.bw_cell=cell_instance_fn() # Initial state of RNN if fw_initial_state is None: self.fw_initial_state = self.fw_cell.zero_state(self.batch_size, dtype=tf.float32) else: self.fw_initial_state = fw_initial_state if bw_initial_state is None: self.bw_initial_state = self.bw_cell.zero_state(self.batch_size, dtype=tf.float32) else: self.bw_initial_state = bw_initial_state # Computes sequence_length if sequence_length is None: try: ## TF1.0 sequence_length = retrieve_seq_length_op( self.inputs if isinstance(self.inputs, tf.Tensor) else tf.stack(self.inputs)) except: ## TF0.12 sequence_length = retrieve_seq_length_op( self.inputs if isinstance(self.inputs, tf.Tensor) else tf.pack(self.inputs)) outputs, (states_fw, states_bw) = tf.nn.bidirectional_dynamic_rnn( cell_fw=self.fw_cell, cell_bw=self.bw_cell, inputs=self.inputs, sequence_length=sequence_length, initial_state_fw=self.fw_initial_state, initial_state_bw=self.bw_initial_state, **dynamic_rnn_init_args ) rnn_variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name) print(" n_params : %d" % (len(rnn_variables))) # Manage the outputs try: # TF1.0 outputs = tf.concat(outputs, 2) except: # TF0.12 outputs = tf.concat(2, outputs) if return_last: # [batch_size, 2 * n_hidden] self.outputs = advanced_indexing_op(outputs, sequence_length) else: # [batch_size, n_step(max), 2 * n_hidden] if return_seq_2d: # PTB tutorial: # 2D Tensor [n_example, 2 * n_hidden] try: # TF1.0 self.outputs = tf.reshape(tf.concat(outputs, 1), [-1, 2 * n_hidden]) except: # TF0.12 self.outputs = tf.reshape(tf.concat(1, outputs), [-1, 2 * n_hidden]) else: # <akara>: # 3D Tensor [batch_size, n_steps(max), 2 * n_hidden] max_length = tf.shape(outputs)[1] batch_size = tf.shape(outputs)[0] try: # TF1.0 self.outputs = tf.reshape(tf.concat(outputs, 1), [batch_size, max_length, 2 * n_hidden]) except: # TF0.12 self.outputs = tf.reshape(tf.concat(1, outputs), [batch_size, max_length, 2 * n_hidden]) # self.outputs = tf.reshape(tf.concat(1, outputs), [-1, max_length, 2 * n_hidden]) # Final state self.fw_final_states = states_fw self.bw_final_states = states_bw self.sequence_length = sequence_length self.all_layers = list(layer.all_layers) self.all_params = list(layer.all_params) self.all_drop = dict(layer.all_drop) self.all_layers.extend( [self.outputs] ) self.all_params.extend( rnn_variables ) # Seq2seq class Seq2Seq(Layer): """ The :class:`Seq2Seq` class is a simple :class:`DynamicRNNLayer` based Seq2seq layer, both encoder and decoder are :class:`DynamicRNNLayer`, network details see `Model <https://camo.githubusercontent.com/242210d7d0151cae91107ee63bff364a860db5dd/687474703a2f2f6936342e74696e797069632e636f6d2f333031333674652e706e67>`_ and `Sequence to Sequence Learning with Neural Networks <https://arxiv.org/abs/1409.3215>`_ . Parameters ---------- net_encode_in : a :class:`Layer` instance Encode sequences, [batch_size, None, n_features]. net_decode_in : a :class:`Layer` instance Decode sequences, [batch_size, None, n_features]. cell_fn : a TensorFlow's core RNN cell as follow (Note TF1.0+ and TF1.0- are different). - see `RNN Cells in TensorFlow <https://www.tensorflow.org/api_docs/python/>`_ cell_init_args : a dictionary The arguments for the cell initializer. n_hidden : a int The number of hidden units in the layer. initializer : initializer The initializer for initializing the parameters. encode_sequence_length : tensor for encoder sequence length, see :class:`DynamicRNNLayer` . decode_sequence_length : tensor for decoder sequence length, see :class:`DynamicRNNLayer` . initial_state : None or forward RNN State If None, initial_state is of encoder zero_state. dropout : `tuple` of `float`: (input_keep_prob, output_keep_prob). The input and output keep probability. n_layer : a int, default is 1. The number of RNN layers. return_seq_2d : boolean - When return_last = False - If True, return 2D Tensor [n_example, n_hidden], for stacking DenseLayer or computing cost after it. - If False, return 3D Tensor [n_example/n_steps(max), n_steps(max), n_hidden], for stacking multiple RNN after it. name : a string or None An optional name to attach to this layer. Variables ------------ outputs : a tensor The output of RNN decoder. final_state : a tensor or StateTuple Final state of decoder, see :class:`DynamicRNNLayer` . Examples ---------- >>> from tensorlayer.layers import * >>> batch_size = 32 >>> encode_seqs = tf.placeholder(dtype=tf.int64, shape=[batch_size, None], name="encode_seqs") >>> decode_seqs = tf.placeholder(dtype=tf.int64, shape=[batch_size, None], name="decode_seqs") >>> target_seqs = tf.placeholder(dtype=tf.int64, shape=[batch_size, None], name="target_seqs") >>> target_mask = tf.placeholder(dtype=tf.int64, shape=[batch_size, None], name="target_mask") # tl.prepro.sequences_get_mask() >>> with tf.variable_scope("model"): ... # for chatbot, you can use the same embedding layer, ... # for translation, you may want to use 2 seperated embedding layers >>> with tf.variable_scope("embedding") as vs: >>> net_encode = EmbeddingInputlayer( ... inputs = encode_seqs, ... vocabulary_size = 10000, ... embedding_size = 200, ... name = 'seq_embedding') >>> vs.reuse_variables() >>> tl.layers.set_name_reuse(True) >>> net_decode = EmbeddingInputlayer( ... inputs = decode_seqs, ... vocabulary_size = 10000, ... embedding_size = 200, ... name = 'seq_embedding') >>> net = Seq2Seq(net_encode, net_decode, ... cell_fn = tf.contrib.rnn.BasicLSTMCell, ... n_hidden = 200, ... initializer = tf.random_uniform_initializer(-0.1, 0.1), ... encode_sequence_length = retrieve_seq_length_op2(encode_seqs), ... decode_sequence_length = retrieve_seq_length_op2(decode_seqs), ... initial_state = None, ... dropout = None, ... n_layer = 1, ... return_seq_2d = True, ... name = 'seq2seq') >>> net_out = DenseLayer(net, n_units=10000, act=tf.identity, name='output') >>> e_loss = tl.cost.cross_entropy_seq_with_mask(logits=net_out.outputs, target_seqs=target_seqs, input_mask=target_mask, return_details=False, name='cost') >>> y = tf.nn.softmax(net_out.outputs) >>> net_out.print_params(False) Notes -------- - How to feed data: `Sequence to Sequence Learning with Neural Networks <https://arxiv.org/pdf/1409.3215v3.pdf>`_ - input_seqs : ``['how', 'are', 'you', '<PAD_ID'>]`` - decode_seqs : ``['<START_ID>', 'I', 'am', 'fine', '<PAD_ID'>]`` - target_seqs : ``['I', 'am', 'fine', '<END_ID']`` - target_mask : ``[1, 1, 1, 1, 0]`` - related functions : tl.prepro <pad_sequences, precess_sequences, sequences_add_start_id, sequences_get_mask> """ def __init__( self, net_encode_in = None, net_decode_in = None, cell_fn = None,#tf.nn.rnn_cell.LSTMCell, cell_init_args = {'state_is_tuple':True}, n_hidden = 256, initializer = tf.random_uniform_initializer(-0.1, 0.1), encode_sequence_length = None, decode_sequence_length = None, initial_state = None, dropout = None, n_layer = 1, # return_last = False, return_seq_2d = False, name = 'seq2seq', ): Layer.__init__(self, name=name) if cell_fn is None: raise Exception("Please put in cell_fn") if 'GRU' in cell_fn.__name__: try: cell_init_args.pop('state_is_tuple') except: pass # self.inputs = layer.outputs print(" [**] Seq2Seq %s: n_hidden:%d cell_fn:%s dropout:%s n_layer:%d" % (self.name, n_hidden, cell_fn.__name__, dropout, n_layer)) with tf.variable_scope(name) as vs:#, reuse=reuse): # tl.layers.set_name_reuse(reuse) # network = InputLayer(self.inputs, name=name+'/input') network_encode = DynamicRNNLayer(net_encode_in, cell_fn = cell_fn, cell_init_args = cell_init_args, n_hidden = n_hidden, initial_state = initial_state, dropout = dropout, n_layer = n_layer, sequence_length = encode_sequence_length, return_last = False, return_seq_2d = True, name = name+'_encode') # vs.reuse_variables() # tl.layers.set_name_reuse(True) network_decode = DynamicRNNLayer(net_decode_in, cell_fn = cell_fn, cell_init_args = cell_init_args, n_hidden = n_hidden, initial_state = network_encode.final_state, dropout = dropout, n_layer = n_layer, sequence_length = decode_sequence_length, return_last = False, return_seq_2d = return_seq_2d, name = name+'_decode') self.outputs = network_decode.outputs rnn_variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name) # Final state self.final_state = network_decode.final_state # self.sequence_length = sequence_length self.all_layers = list(network_decode.all_layers) self.all_params = list(network_decode.all_params) self.all_drop = dict(network_decode.all_drop) self.all_layers.extend( [self.outputs] ) self.all_params.extend( rnn_variables ) self.all_layers = list_remove_repeat(self.all_layers) self.all_params = list_remove_repeat(self.all_params) class PeekySeq2Seq(Layer): """ Waiting for contribution. The :class:`PeekySeq2Seq` class, see `Model <https://camo.githubusercontent.com/7f690d451036938a51e62feb77149c8bb4be6675/687474703a2f2f6936342e74696e797069632e636f6d2f333032617168692e706e67>`_ and `Learning Phrase Representations using RNN Encoder-Decoder for Statistical Machine Translation <https://arxiv.org/abs/1406.1078>`_ . """ def __init__( self, net_encode_in = None, net_decode_in = None, cell_fn = None,#tf.nn.rnn_cell.LSTMCell, cell_init_args = {'state_is_tuple':True}, n_hidden = 256, initializer = tf.random_uniform_initializer(-0.1, 0.1), in_sequence_length = None, out_sequence_length = None, initial_state = None, dropout = None, n_layer = 1, # return_last = False, return_seq_2d = False, name = 'peeky_seq2seq', ): Layer.__init__(self, name=name) if cell_fn is None: raise Exception("Please put in cell_fn") # self.inputs = layer.outputs print(" [TL] PeekySeq2seq %s: n_hidden:%d cell_fn:%s dropout:%s n_layer:%d" % (self.name, n_hidden, cell_fn.__name__, dropout, n_layer)) class AttentionSeq2Seq(Layer): """ Waiting for contribution. The :class:`AttentionSeq2Seq` class, see `Model <https://camo.githubusercontent.com/0e2e4e5fb2dd47846c2fe027737a5df5e711df1b/687474703a2f2f6936342e74696e797069632e636f6d2f6132727733642e706e67>`_ and `Neural Machine Translation by Jointly Learning to Align and Translate <https://arxiv.org/pdf/1409.0473v6.pdf>`_ . """ def __init__( self, net_encode_in = None, net_decode_in = None, cell_fn = None,#tf.nn.rnn_cell.LSTMCell, cell_init_args = {'state_is_tuple':True}, n_hidden = 256, initializer = tf.random_uniform_initializer(-0.1, 0.1), in_sequence_length = None, out_sequence_length = None, initial_state = None, dropout = None, n_layer = 1, # return_last = False, return_seq_2d = False, name = 'attention_seq2seq', ): Layer.__init__(self, name=name) if cell_fn is None: raise Exception("Please put in cell_fn") # self.inputs = layer.outputs print(" [TL] PeekySeq2seq %s: n_hidden:%d cell_fn:%s dropout:%s n_layer:%d" % (self.name, n_hidden, cell_fn.__name__, dropout, n_layer)) ## Shape layer class FlattenLayer(Layer): """ The :class:`FlattenLayer` class is layer which reshape high-dimension input to a vector. Then we can apply DenseLayer, RNNLayer, ConcatLayer and etc on the top of it. [batch_size, mask_row, mask_col, n_mask] ---> [batch_size, mask_row * mask_col * n_mask] Parameters ---------- layer : a :class:`Layer` instance The `Layer` class feeding into this layer. name : a string or None An optional name to attach to this layer. Examples -------- >>> x = tf.placeholder(tf.float32, shape=[None, 28, 28, 1]) >>> network = tl.layers.InputLayer(x, name='input_layer') >>> network = tl.layers.Conv2dLayer(network, ... act = tf.nn.relu, ... shape = [5, 5, 32, 64], ... strides=[1, 1, 1, 1], ... padding='SAME', ... name ='cnn_layer') >>> network = tl.layers.Pool2dLayer(network, ... ksize=[1, 2, 2, 1], ... strides=[1, 2, 2, 1], ... padding='SAME', ... pool = tf.nn.max_pool, ... name ='pool_layer',) >>> network = tl.layers.FlattenLayer(network, name='flatten_layer') """ def __init__( self, layer = None, name ='flatten_layer', ): Layer.__init__(self, name=name) self.inputs = layer.outputs self.outputs = flatten_reshape(self.inputs, name=name) self.n_units = int(self.outputs.get_shape()[-1]) print(" [TL] FlattenLayer %s: %d" % (self.name, self.n_units)) self.all_layers = list(layer.all_layers) self.all_params = list(layer.all_params) self.all_drop = dict(layer.all_drop) self.all_layers.extend( [self.outputs] ) class ReshapeLayer(Layer): """ The :class:`ReshapeLayer` class is layer which reshape the tensor. Parameters ---------- layer : a :class:`Layer` instance The `Layer` class feeding into this layer. shape : a list The output shape. name : a string or None An optional name to attach to this layer. Examples -------- - The core of this layer is ``tf.reshape``. - Use TensorFlow only : >>> x = tf.placeholder(tf.float32, shape=[None, 3]) >>> y = tf.reshape(x, shape=[-1, 3, 3]) >>> sess = tf.InteractiveSession() >>> print(sess.run(y, feed_dict={x:[[1,1,1],[2,2,2],[3,3,3],[4,4,4],[5,5,5],[6,6,6]]})) ... [[[ 1. 1. 1.] ... [ 2. 2. 2.] ... [ 3. 3. 3.]] ... [[ 4. 4. 4.] ... [ 5. 5. 5.] ... [ 6. 6. 6.]]] """ def __init__( self, layer = None, shape = [], name ='reshape_layer', ): Layer.__init__(self, name=name) self.inputs = layer.outputs self.outputs = tf.reshape(self.inputs, shape=shape, name=name) print(" [TL] ReshapeLayer %s: %s" % (self.name, self.outputs.get_shape())) self.all_layers = list(layer.all_layers) self.all_params = list(layer.all_params) self.all_drop = dict(layer.all_drop) self.all_layers.extend( [self.outputs] ) class LambdaLayer(Layer): """ The :class:`LambdaLayer` class is a layer which is able to use the provided function. Parameters ---------- layer : a :class:`Layer` instance The `Layer` class feeding into this layer. fn : a function The function that applies to the outputs of previous layer. fn_args : a dictionary The arguments for the function (option). name : a string or None An optional name to attach to this layer. Examples --------- >>> x = tf.placeholder(tf.float32, shape=[None, 1], name='x') >>> network = tl.layers.InputLayer(x, name='input_layer') >>> network = LambdaLayer(network, lambda x: 2*x, name='lambda_layer') >>> y = network.outputs >>> sess = tf.InteractiveSession() >>> out = sess.run(y, feed_dict={x : [[1],[2]]}) ... [[2],[4]] """ def __init__( self, layer = None, fn = None, fn_args = {}, name = 'lambda_layer', ): Layer.__init__(self, name=name) assert layer is not None assert fn is not None self.inputs = layer.outputs print(" [TL] LambdaLayer %s" % self.name) with tf.variable_scope(name) as vs: self.outputs = fn(self.inputs, **fn_args) variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name) self.all_layers = list(layer.all_layers) self.all_params = list(layer.all_params) self.all_drop = dict(layer.all_drop) self.all_layers.extend( [self.outputs] ) self.all_params.extend( variables ) ## Merge layer class ConcatLayer(Layer): """ The :class:`ConcatLayer` class is layer which concat (merge) two or more :class:`DenseLayer` to a single class:`DenseLayer`. Parameters ---------- layer : a list of :class:`Layer` instances The `Layer` class feeding into this layer. concat_dim : int Dimension along which to concatenate. name : a string or None An optional name to attach to this layer. Examples -------- >>> sess = tf.InteractiveSession() >>> x = tf.placeholder(tf.float32, shape=[None, 784]) >>> inputs = tl.layers.InputLayer(x, name='input_layer') >>> net1 = tl.layers.DenseLayer(inputs, n_units=800, act = tf.nn.relu, name='relu1_1') >>> net2 = tl.layers.DenseLayer(inputs, n_units=300, act = tf.nn.relu, name='relu2_1') >>> network = tl.layers.ConcatLayer(layer = [net1, net2], name ='concat_layer') ... [TL] InputLayer input_layer (?, 784) ... [TL] DenseLayer relu1_1: 800, <function relu at 0x1108e41e0> ... [TL] DenseLayer relu2_1: 300, <function relu at 0x1108e41e0> ... [TL] ConcatLayer concat_layer, 1100 ... >>> tl.layers.initialize_global_variables(sess) >>> network.print_params() ... param 0: (784, 800) (mean: 0.000021, median: -0.000020 std: 0.035525) ... param 1: (800,) (mean: 0.000000, median: 0.000000 std: 0.000000) ... param 2: (784, 300) (mean: 0.000000, median: -0.000048 std: 0.042947) ... param 3: (300,) (mean: 0.000000, median: 0.000000 std: 0.000000) ... num of params: 863500 >>> network.print_layers() ... layer 0: Tensor("Relu:0", shape=(?, 800), dtype=float32) ... layer 1: Tensor("Relu_1:0", shape=(?, 300), dtype=float32) ... """ def __init__( self, layer = [], concat_dim = 1, name ='concat_layer', ): Layer.__init__(self, name=name) self.inputs = [] for l in layer: self.inputs.append(l.outputs) try: # TF1.0 self.outputs = tf.concat(self.inputs, concat_dim, name=name) except: # TF0.12 self.outputs = tf.concat(concat_dim, self.inputs, name=name) self.n_units = int(self.outputs.get_shape()[-1]) print(" [TL] ConcatLayer %s: %d" % (self.name, self.n_units)) self.all_layers = list(layer[0].all_layers) self.all_params = list(layer[0].all_params) self.all_drop = dict(layer[0].all_drop) for i in range(1, len(layer)): self.all_layers.extend(list(layer[i].all_layers)) self.all_params.extend(list(layer[i].all_params)) self.all_drop.update(dict(layer[i].all_drop)) self.all_layers = list_remove_repeat(self.all_layers) self.all_params = list_remove_repeat(self.all_params) #self.all_drop = list_remove_repeat(self.all_drop) # it is a dict class ElementwiseLayer(Layer): """ The :class:`ElementwiseLayer` class combines multiple :class:`Layer` which have the same output shapes by a given elemwise-wise operation. Parameters ---------- layer : a list of :class:`Layer` instances The `Layer` class feeding into this layer. combine_fn : a TensorFlow elemwise-merge function e.g. AND is ``tf.minimum`` ; OR is ``tf.maximum`` ; ADD is ``tf.add`` ; MUL is ``tf.multiply`` and so on. See `TensorFlow Math API <https://www.tensorflow.org/versions/master/api_docs/python/math_ops.html#math>`_ . name : a string or None An optional name to attach to this layer. Examples -------- - AND Logic >>> net_0 = tl.layers.DenseLayer(net_0, n_units=500, ... act = tf.nn.relu, name='net_0') >>> net_1 = tl.layers.DenseLayer(net_1, n_units=500, ... act = tf.nn.relu, name='net_1') >>> net_com = tl.layers.ElementwiseLayer(layer = [net_0, net_1], ... combine_fn = tf.minimum, ... name = 'combine_layer') """ def __init__( self, layer = [], combine_fn = tf.minimum, name ='elementwise_layer', ): Layer.__init__(self, name=name) print(" [TL] ElementwiseLayer %s: size:%s fn:%s" % (self.name, layer[0].outputs.get_shape(), combine_fn.__name__)) self.outputs = layer[0].outputs # print(self.outputs._shape, type(self.outputs._shape)) for l in layer[1:]: assert str(self.outputs.get_shape()) == str(l.outputs.get_shape()), "Hint: the input shapes should be the same. %s != %s" % (self.outputs.get_shape() , str(l.outputs.get_shape())) self.outputs = combine_fn(self.outputs, l.outputs, name=name) self.all_layers = list(layer[0].all_layers) self.all_params = list(layer[0].all_params) self.all_drop = dict(layer[0].all_drop) for i in range(1, len(layer)): self.all_layers.extend(list(layer[i].all_layers)) self.all_params.extend(list(layer[i].all_params)) self.all_drop.update(dict(layer[i].all_drop)) self.all_layers = list_remove_repeat(self.all_layers) self.all_params = list_remove_repeat(self.all_params) # self.all_drop = list_remove_repeat(self.all_drop) # Extend class ExpandDimsLayer(Layer): """ The :class:`ExpandDimsLayer` class inserts a dimension of 1 into a tensor's shape, see `tf.expand_dims() <https://www.tensorflow.org/api_docs/python/array_ops/shapes_and_shaping#expand_dims>`_ . Parameters ---------- layer : a :class:`Layer` instance The `Layer` class feeding into this layer. axis : int, 0-D (scalar). Specifies the dimension index at which to expand the shape of input. name : a string or None An optional name to attach to this layer. """ def __init__( self, layer = None, axis = None, name = 'expand_dims', ): Layer.__init__(self, name=name) self.inputs = layer.outputs print(" [TL] ExpandDimsLayer %s: axis:%d" % (self.name, axis)) with tf.variable_scope(name) as vs: try: # TF12 TF1.0 self.outputs = tf.expand_dims(self.inputs, axis=axis) except: # TF11 self.outputs = tf.expand_dims(self.inputs, dim=axis) self.all_layers = list(layer.all_layers) self.all_params = list(layer.all_params) self.all_drop = dict(layer.all_drop) self.all_layers.extend( [self.outputs] ) # self.all_params.extend( variables ) class TileLayer(Layer): """ The :class:`TileLayer` class constructs a tensor by tiling a given tensor, see `tf.tile() <https://www.tensorflow.org/api_docs/python/array_ops/slicing_and_joining#tile>`_ . Parameters ---------- layer : a :class:`Layer` instance The `Layer` class feeding into this layer. multiples: a list of int Must be one of the following types: int32, int64. 1-D. Length must be the same as the number of dimensions in input name : a string or None An optional name to attach to this layer. """ def __init__( self, layer = None, multiples = None, name = 'tile', ): Layer.__init__(self, name=name) self.inputs = layer.outputs print(" [TL] TileLayer %s: multiples:%s" % (self.name, multiples)) with tf.variable_scope(name) as vs: self.outputs = tf.tile(self.inputs, multiples=multiples) self.all_layers = list(layer.all_layers) self.all_params = list(layer.all_params) self.all_drop = dict(layer.all_drop) self.all_layers.extend( [self.outputs] ) # self.all_params.extend( variables ) ## TF-Slim layer class SlimNetsLayer(Layer): """ The :class:`SlimNetsLayer` class can be used to merge all TF-Slim nets into TensorLayer. Model can be found in `slim-model <https://github.com/tensorflow/models/tree/master/slim#pre-trained-models>`_ , more about slim see `slim-git <https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/slim>`_ . Parameters ---------- layer : a :class:`Layer` instance The `Layer` class feeding into this layer. slim_layer : a slim network function The network you want to stack onto, end with ``return net, end_points``. slim_args : dictionary The arguments for the slim model. name : a string or None An optional name to attach to this layer. Examples -------- - see Inception V3 example on `Github <https://github.com/zsdonghao/tensorlayer>`_ Notes ----- The due to TF-Slim stores the layers as dictionary, the ``all_layers`` in this network is not in order ! Fortunately, the ``all_params`` are in order. """ def __init__( self, layer = None, slim_layer = None, slim_args = {}, name ='tfslim_layer', ): Layer.__init__(self, name=name) assert slim_layer is not None assert slim_args is not None self.inputs = layer.outputs print(" [TL] SlimNetsLayer %s: %s" % (self.name, slim_layer.__name__)) # with tf.variable_scope(name) as vs: # net, end_points = slim_layer(self.inputs, **slim_args) # slim_variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name) net, end_points = slim_layer(self.inputs, **slim_args) slim_variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=name) if slim_variables == []: print("No variables found under %s : the name of SlimNetsLayer should be matched with the begining of the ckpt file, see tutorial_inceptionV3_tfslim.py for more details" % name) self.outputs = net slim_layers = [] for v in end_points.values(): # tf.contrib.layers.summaries.summarize_activation(v) slim_layers.append(v) self.all_layers = list(layer.all_layers) self.all_params = list(layer.all_params) self.all_drop = dict(layer.all_drop) self.all_layers.extend( slim_layers ) self.all_params.extend( slim_variables ) ## Keras layer class KerasLayer(Layer): """ The :class:`KerasLayer` class can be used to merge all Keras layers into TensorLayer. Example can be found here `tutorial_keras.py <https://github.com/zsdonghao/tensorlayer/blob/master/example/tutorial_keras.py>`_ Parameters ---------- layer : a :class:`Layer` instance The `Layer` class feeding into this layer. keras_layer : a keras network function keras_args : dictionary The arguments for the keras model. name : a string or None An optional name to attach to this layer. """ def __init__( self, layer = None, keras_layer = None, keras_args = {}, name ='keras_layer', ): Layer.__init__(self, name=name) assert layer is not None assert keras_layer is not None self.inputs = layer.outputs print(" [TL] KerasLayer %s: %s" % (self.name, keras_layer)) print(" This API will be removed, please use LambdaLayer instead.") with tf.variable_scope(name) as vs: self.outputs = keras_layer(self.inputs, **keras_args) variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name) self.all_layers = list(layer.all_layers) self.all_params = list(layer.all_params) self.all_drop = dict(layer.all_drop) self.all_layers.extend( [self.outputs] ) self.all_params.extend( variables ) ## Estimator layer class EstimatorLayer(Layer): """ The :class:`EstimatorLayer` class accepts ``model_fn`` that described the model. It is similar with :class:`KerasLayer`, see `tutorial_keras.py <https://github.com/zsdonghao/tensorlayer/blob/master/example/tutorial_keras.py>`_ Parameters ---------- layer : a :class:`Layer` instance The `Layer` class feeding into this layer. model_fn : a function that described the model. args : dictionary The arguments for the model_fn. name : a string or None An optional name to attach to this layer. """ def __init__( self, layer = None, model_fn = None, args = {}, name ='estimator_layer', ): Layer.__init__(self, name=name) assert layer is not None assert model_fn is not None self.inputs = layer.outputs print(" [TL] EstimatorLayer %s: %s" % (self.name, model_fn)) print(" This API will be removed, please use LambdaLayer instead.") with tf.variable_scope(name) as vs: self.outputs = model_fn(self.inputs, **args) variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name) self.all_layers = list(layer.all_layers) self.all_params = list(layer.all_params) self.all_drop = dict(layer.all_drop) self.all_layers.extend( [self.outputs] ) self.all_params.extend( variables ) ## Special activation class PReluLayer(Layer): """ The :class:`PReluLayer` class is Parametric Rectified Linear layer. Parameters ---------- x : A `Tensor` with type `float`, `double`, `int32`, `int64`, `uint8`, `int16`, or `int8`. channel_shared : `bool`. Single weight is shared by all channels a_init : alpha initializer, default zero constant. The initializer for initializing the alphas. a_init_args : dictionary The arguments for the weights initializer. name : A name for this activation op (optional). References ----------- - `Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification <http://arxiv.org/pdf/1502.01852v1.pdf>`_ """ def __init__( self, layer = None, channel_shared = False, a_init = tf.constant_initializer(value=0.0), a_init_args = {}, # restore = True, name="prelu_layer" ): Layer.__init__(self, name=name) self.inputs = layer.outputs print(" [TL] PReluLayer %s: channel_shared:%s" % (self.name, channel_shared)) if channel_shared: w_shape = (1,) else: w_shape = int(self.inputs.get_shape()[-1]) # with tf.name_scope(name) as scope: with tf.variable_scope(name) as vs: alphas = tf.get_variable(name='alphas', shape=w_shape, initializer=a_init, **a_init_args ) try: ## TF 1.0 self.outputs = tf.nn.relu(self.inputs) + tf.multiply(alphas, (self.inputs - tf.abs(self.inputs))) * 0.5 except: ## TF 0.12 self.outputs = tf.nn.relu(self.inputs) + tf.mul(alphas, (self.inputs - tf.abs(self.inputs))) * 0.5 self.all_layers = list(layer.all_layers) self.all_params = list(layer.all_params) self.all_drop = dict(layer.all_drop) self.all_layers.extend( [self.outputs] ) self.all_params.extend( [alphas] ) ## Flow control layer class MultiplexerLayer(Layer): """ The :class:`MultiplexerLayer` selects one of several input and forwards the selected input into the output, see `tutorial_mnist_multiplexer.py`. Parameters ---------- layer : a list of :class:`Layer` instances The `Layer` class feeding into this layer. name : a string or None An optional name to attach to this layer. Variables ----------------------- sel : a placeholder Input an int [0, inf], which input is the output Examples -------- >>> x = tf.placeholder(tf.float32, shape=[None, 784], name='x') >>> y_ = tf.placeholder(tf.int64, shape=[None, ], name='y_') >>> # define the network >>> net_in = tl.layers.InputLayer(x, name='input_layer') >>> net_in = tl.layers.DropoutLayer(net_in, keep=0.8, name='drop1') >>> # net 0 >>> net_0 = tl.layers.DenseLayer(net_in, n_units=800, ... act = tf.nn.relu, name='net0/relu1') >>> net_0 = tl.layers.DropoutLayer(net_0, keep=0.5, name='net0/drop2') >>> net_0 = tl.layers.DenseLayer(net_0, n_units=800, ... act = tf.nn.relu, name='net0/relu2') >>> # net 1 >>> net_1 = tl.layers.DenseLayer(net_in, n_units=800, ... act = tf.nn.relu, name='net1/relu1') >>> net_1 = tl.layers.DropoutLayer(net_1, keep=0.8, name='net1/drop2') >>> net_1 = tl.layers.DenseLayer(net_1, n_units=800, ... act = tf.nn.relu, name='net1/relu2') >>> net_1 = tl.layers.DropoutLayer(net_1, keep=0.8, name='net1/drop3') >>> net_1 = tl.layers.DenseLayer(net_1, n_units=800, ... act = tf.nn.relu, name='net1/relu3') >>> # multiplexer >>> net_mux = tl.layers.MultiplexerLayer(layer = [net_0, net_1], name='mux_layer') >>> network = tl.layers.ReshapeLayer(net_mux, shape=[-1, 800], name='reshape_layer') # >>> network = tl.layers.DropoutLayer(network, keep=0.5, name='drop3') >>> # output layer >>> network = tl.layers.DenseLayer(network, n_units=10, ... act = tf.identity, name='output_layer') References ------------ - See ``tf.pack() for TF0.12 or tf.stack() for TF1.0`` and ``tf.gather()`` at `TensorFlow - Slicing and Joining <https://www.tensorflow.org/versions/master/api_docs/python/array_ops.html#slicing-and-joining>`_ """ def __init__(self, layer = [], name='mux_layer'): Layer.__init__(self, name=name) self.n_inputs = len(layer) self.inputs = [] for l in layer: self.inputs.append(l.outputs) try: ## TF1.0 all_inputs = tf.stack(self.inputs, name=name) # pack means concat a list of tensor in a new dim # 1.2 except: all_inputs = tf.pack(self.inputs, name=name) # pack means concat a list of tensor in a new dim # 1.2 print(" [TL] MultiplexerLayer %s: n_inputs:%d" % (self.name, self.n_inputs)) self.sel = tf.placeholder(tf.int32) self.outputs = tf.gather(all_inputs, self.sel, name=name) # [sel, :, : ...] # 1.2 # print(self.outputs, vars(self.outputs)) # # tf.reshape(self.outputs, shape=) # exit() # the same with ConcatLayer self.all_layers = list(layer[0].all_layers) self.all_params = list(layer[0].all_params) self.all_drop = dict(layer[0].all_drop) for i in range(1, len(layer)): self.all_layers.extend(list(layer[i].all_layers)) self.all_params.extend(list(layer[i].all_params)) self.all_drop.update(dict(layer[i].all_drop)) self.all_layers = list_remove_repeat(self.all_layers) self.all_params = list_remove_repeat(self.all_params) # self.all_drop = list_remove_repeat(self.all_drop) ## We can Duplicate the network instead of DemultiplexerLayer # class DemultiplexerLayer(Layer): # """ # The :class:`DemultiplexerLayer` takes a single input and select one of many output lines, which is connected to the input. # # Parameters # ---------- # layer : a list of :class:`Layer` instances # The `Layer` class feeding into this layer. # n_outputs : a int # The number of output # name : a string or None # An optional name to attach to this layer. # # Field (Class Variables) # ----------------------- # sel : a placeholder # Input int [0, inf], the # outputs : a list of Tensor # A list of outputs # # Examples # -------- # >>> # """ # def __init__(self, # layer = None, # name='demux_layer'): # Layer.__init__(self, name=name) # self.outputs = [] ## Wrapper class EmbeddingAttentionSeq2seqWrapper(Layer): """Sequence-to-sequence model with attention and for multiple buckets (Deprecated after TF0.12). This example implements a multi-layer recurrent neural network as encoder, and an attention-based decoder. This is the same as the model described in this paper: - `Grammar as a Foreign Language <http://arxiv.org/abs/1412.7449>`_ please look there for details, or into the seq2seq library for complete model implementation. This example also allows to use GRU cells in addition to LSTM cells, and sampled softmax to handle large output vocabulary size. A single-layer version of this model, but with bi-directional encoder, was presented in - `Neural Machine Translation by Jointly Learning to Align and Translate <http://arxiv.org/abs/1409.0473>`_ The sampled softmax is described in Section 3 of the following paper. - `On Using Very Large Target Vocabulary for Neural Machine Translation <http://arxiv.org/abs/1412.2007>`_ Parameters ---------- source_vocab_size : size of the source vocabulary. target_vocab_size : size of the target vocabulary. buckets : a list of pairs (I, O), where I specifies maximum input length that will be processed in that bucket, and O specifies maximum output length. Training instances that have inputs longer than I or outputs longer than O will be pushed to the next bucket and padded accordingly. We assume that the list is sorted, e.g., [(2, 4), (8, 16)]. size : number of units in each layer of the model. num_layers : number of layers in the model. max_gradient_norm : gradients will be clipped to maximally this norm. batch_size : the size of the batches used during training; the model construction is independent of batch_size, so it can be changed after initialization if this is convenient, e.g., for decoding. learning_rate : learning rate to start with. learning_rate_decay_factor : decay learning rate by this much when needed. use_lstm : if true, we use LSTM cells instead of GRU cells. num_samples : number of samples for sampled softmax. forward_only : if set, we do not construct the backward pass in the model. name : a string or None An optional name to attach to this layer. """ def __init__(self, source_vocab_size, target_vocab_size, buckets, size, num_layers, max_gradient_norm, batch_size, learning_rate, learning_rate_decay_factor, use_lstm=False, num_samples=512, forward_only=False, name='wrapper'): Layer.__init__(self)#, name=name) self.source_vocab_size = source_vocab_size self.target_vocab_size = target_vocab_size self.buckets = buckets self.batch_size = batch_size self.learning_rate = tf.Variable(float(learning_rate), trainable=False, name='learning_rate') self.learning_rate_decay_op = self.learning_rate.assign( self.learning_rate * learning_rate_decay_factor) self.global_step = tf.Variable(0, trainable=False, name='global_step') if tf.__version__ >= "0.12": raise Exception("Deprecated after TF0.12 : use other seq2seq layers instead.") # =========== Fake output Layer for compute cost ====== # If we use sampled softmax, we need an output projection. with tf.variable_scope(name) as vs: output_projection = None softmax_loss_function = None # Sampled softmax only makes sense if we sample less than vocabulary size. if num_samples > 0 and num_samples < self.target_vocab_size: w = tf.get_variable("proj_w", [size, self.target_vocab_size]) w_t = tf.transpose(w) b = tf.get_variable("proj_b", [self.target_vocab_size]) output_projection = (w, b) def sampled_loss(inputs, labels): labels = tf.reshape(labels, [-1, 1]) return tf.nn.sampled_softmax_loss(w_t, b, inputs, labels, num_samples, self.target_vocab_size) softmax_loss_function = sampled_loss # ============ Seq Encode Layer ============= # Create the internal multi-layer cell for our RNN. try: # TF1.0 cell_creator = lambda: tf.contrib.rnn.GRUCell(size) except: cell_creator = lambda: tf.nn.rnn_cell.GRUCell(size) if use_lstm: try: # TF1.0 cell_creator = lambda: tf.contrib.rnn.BasicLSTMCell(size) except: cell_creator = lambda: tf.nn.rnn_cell.BasicLSTMCell(size) cell = cell_creator() if num_layers > 1: try: # TF1.0 cell = tf.contrib.rnn.MultiRNNCell([single_cell] * num_layers) except: cell = tf.nn.rnn_cell.MultiRNNCell([single_cell] * num_layers) # ============== Seq Decode Layer ============ # The seq2seq function: we use embedding for the input and attention. def seq2seq_f(encoder_inputs, decoder_inputs, do_decode): return tf.nn.seq2seq.embedding_attention_seq2seq( encoder_inputs, decoder_inputs, cell, num_encoder_symbols=source_vocab_size, num_decoder_symbols=target_vocab_size, embedding_size=size, output_projection=output_projection, feed_previous=do_decode) #============================================================= # Feeds for inputs. self.encoder_inputs = [] self.decoder_inputs = [] self.target_weights = [] for i in xrange(buckets[-1][0]): # Last bucket is the biggest one. self.encoder_inputs.append(tf.placeholder(tf.int32, shape=[None], name="encoder{0}".format(i))) for i in xrange(buckets[-1][1] + 1): self.decoder_inputs.append(tf.placeholder(tf.int32, shape=[None], name="decoder{0}".format(i))) self.target_weights.append(tf.placeholder(tf.float32, shape=[None], name="weight{0}".format(i))) # Our targets are decoder inputs shifted by one. targets = [self.decoder_inputs[i + 1] for i in xrange(len(self.decoder_inputs) - 1)] self.targets = targets # DH add for debug # Training outputs and losses. if forward_only: self.outputs, self.losses = tf.nn.seq2seq.model_with_buckets( self.encoder_inputs, self.decoder_inputs, targets, self.target_weights, buckets, lambda x, y: seq2seq_f(x, y, True), softmax_loss_function=softmax_loss_function) # If we use output projection, we need to project outputs for decoding. if output_projection is not None: for b in xrange(len(buckets)): self.outputs[b] = [ tf.matmul(output, output_projection[0]) + output_projection[1] for output in self.outputs[b] ] else: self.outputs, self.losses = tf.nn.seq2seq.model_with_buckets( self.encoder_inputs, self.decoder_inputs, targets, self.target_weights, buckets, lambda x, y: seq2seq_f(x, y, False), softmax_loss_function=softmax_loss_function) # Gradients and SGD update operation for training the model. params = tf.trainable_variables() if not forward_only: self.gradient_norms = [] self.updates = [] opt = tf.train.GradientDescentOptimizer(self.learning_rate) for b in xrange(len(buckets)): gradients = tf.gradients(self.losses[b], params) clipped_gradients, norm = tf.clip_by_global_norm(gradients, max_gradient_norm) self.gradient_norms.append(norm) self.updates.append(opt.apply_gradients( zip(clipped_gradients, params), global_step=self.global_step)) # if save into npz self.all_params = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name) # if save into ckpt self.saver = tf.train.Saver(tf.all_variables()) def step(self, session, encoder_inputs, decoder_inputs, target_weights, bucket_id, forward_only): """Run a step of the model feeding the given inputs. Parameters ---------- session : tensorflow session to use. encoder_inputs : list of numpy int vectors to feed as encoder inputs. decoder_inputs : list of numpy int vectors to feed as decoder inputs. target_weights : list of numpy float vectors to feed as target weights. bucket_id : which bucket of the model to use. forward_only : whether to do the backward step or only forward. Returns -------- A triple consisting of gradient norm (or None if we did not do backward), average perplexity, and the outputs. Raises -------- ValueError : if length of encoder_inputs, decoder_inputs, or target_weights disagrees with bucket size for the specified bucket_id. """ # Check if the sizes match. encoder_size, decoder_size = self.buckets[bucket_id] if len(encoder_inputs) != encoder_size: raise ValueError("Encoder length must be equal to the one in bucket," " %d != %d." % (len(encoder_inputs), encoder_size)) if len(decoder_inputs) != decoder_size: raise ValueError("Decoder length must be equal to the one in bucket," " %d != %d." % (len(decoder_inputs), decoder_size)) if len(target_weights) != decoder_size: raise ValueError("Weights length must be equal to the one in bucket," " %d != %d." % (len(target_weights), decoder_size)) # print('in model.step()') # print('a',bucket_id, encoder_size, decoder_size) # Input feed: encoder inputs, decoder inputs, target_weights, as provided. input_feed = {} for l in xrange(encoder_size): input_feed[self.encoder_inputs[l].name] = encoder_inputs[l] for l in xrange(decoder_size): input_feed[self.decoder_inputs[l].name] = decoder_inputs[l] input_feed[self.target_weights[l].name] = target_weights[l] # print(self.encoder_inputs[l].name) # print(self.decoder_inputs[l].name) # print(self.target_weights[l].name) # Since our targets are decoder inputs shifted by one, we need one more. last_target = self.decoder_inputs[decoder_size].name input_feed[last_target] = np.zeros([self.batch_size], dtype=np.int32) # print('last_target', last_target) # Output feed: depends on whether we do a backward step or not. if not forward_only: output_feed = [self.updates[bucket_id], # Update Op that does SGD. self.gradient_norms[bucket_id], # Gradient norm. self.losses[bucket_id]] # Loss for this batch. else: output_feed = [self.losses[bucket_id]] # Loss for this batch. for l in xrange(decoder_size): # Output logits. output_feed.append(self.outputs[bucket_id][l]) outputs = session.run(output_feed, input_feed) if not forward_only: return outputs[1], outputs[2], None # Gradient norm, loss, no outputs. else: return None, outputs[0], outputs[1:] # No gradient norm, loss, outputs. def get_batch(self, data, bucket_id, PAD_ID=0, GO_ID=1, EOS_ID=2, UNK_ID=3): """ Get a random batch of data from the specified bucket, prepare for step. To feed data in step(..) it must be a list of batch-major vectors, while data here contains single length-major cases. So the main logic of this function is to re-index data cases to be in the proper format for feeding. Parameters ---------- data : a tuple of size len(self.buckets) in which each element contains lists of pairs of input and output data that we use to create a batch. bucket_id : integer, which bucket to get the batch for. PAD_ID : int Index of Padding in vocabulary GO_ID : int Index of GO in vocabulary EOS_ID : int Index of End of sentence in vocabulary UNK_ID : int Index of Unknown word in vocabulary Returns ------- The triple (encoder_inputs, decoder_inputs, target_weights) for the constructed batch that has the proper format to call step(...) later. """ encoder_size, decoder_size = self.buckets[bucket_id] encoder_inputs, decoder_inputs = [], [] # Get a random batch of encoder and decoder inputs from data, # pad them if needed, reverse encoder inputs and add GO to decoder. for _ in xrange(self.batch_size): encoder_input, decoder_input = random.choice(data[bucket_id]) # Encoder inputs are padded and then reversed. encoder_pad = [PAD_ID] * (encoder_size - len(encoder_input)) encoder_inputs.append(list(reversed(encoder_input + encoder_pad))) # Decoder inputs get an extra "GO" symbol, and are padded then. decoder_pad_size = decoder_size - len(decoder_input) - 1 decoder_inputs.append([GO_ID] + decoder_input + [PAD_ID] * decoder_pad_size) # Now we create batch-major vectors from the data selected above. batch_encoder_inputs, batch_decoder_inputs, batch_weights = [], [], [] # Batch encoder inputs are just re-indexed encoder_inputs. for length_idx in xrange(encoder_size): batch_encoder_inputs.append( np.array([encoder_inputs[batch_idx][length_idx] for batch_idx in xrange(self.batch_size)], dtype=np.int32)) # Batch decoder inputs are re-indexed decoder_inputs, we create weights. for length_idx in xrange(decoder_size): batch_decoder_inputs.append( np.array([decoder_inputs[batch_idx][length_idx] for batch_idx in xrange(self.batch_size)], dtype=np.int32)) # Create target_weights to be 0 for targets that are padding. batch_weight = np.ones(self.batch_size, dtype=np.float32) for batch_idx in xrange(self.batch_size): # We set weight to 0 if the corresponding target is a PAD symbol. # The corresponding target is decoder_input shifted by 1 forward. if length_idx < decoder_size - 1: target = decoder_inputs[batch_idx][length_idx + 1] if length_idx == decoder_size - 1 or target == PAD_ID: batch_weight[batch_idx] = 0.0 batch_weights.append(batch_weight) return batch_encoder_inputs, batch_decoder_inputs, batch_weights ## Developing or Untested class MaxoutLayer(Layer): """ Waiting for contribution Single DenseLayer with Max-out behaviour, work well with Dropout. References ----------- `Goodfellow (2013) Maxout Networks <http://arxiv.org/abs/1302.4389>`_ """ def __init__( self, layer = None, n_units = 100, name ='maxout_layer', ): Layer.__init__(self, name=name) self.inputs = layer.outputs print(" [TL] MaxoutLayer %s: %d" % (self.name, self.n_units)) print(" Waiting for contribution") with tf.variable_scope(name) as vs: pass # W = tf.Variable(init.xavier_init(n_inputs=n_in, n_outputs=n_units, uniform=True), name='W') # b = tf.Variable(tf.zeros([n_units]), name='b') # self.outputs = act(tf.matmul(self.inputs, W) + b) # https://www.tensorflow.org/versions/r0.9/api_docs/python/array_ops.html#pack # http://stackoverflow.com/questions/34362193/how-to-explicitly-broadcast-a-tensor-to-match-anothers-shape-in-tensorflow # tf.concat tf.pack tf.tile self.all_layers = list(layer.all_layers) self.all_params = list(layer.all_params) self.all_drop = dict(layer.all_drop) self.all_layers.extend( [self.outputs] ) self.all_params.extend( [W, b] ) #
zjuela/LapSRN-tensorflow
tensorlayer/layers.py
Python
apache-2.0
264,811
[ "NEURON" ]
83a7e1b1774b82b7331ba1ab87cd032185d1d4d883af95f7b819f9f32d52e945
# -*- coding: utf-8 -*- """ Created on Mon Jun 23 12:11:37 2014 @author: andylane This script is designed to loop through generated BLAST files and given a particular off-target-match cutoff, plot out the specificity profiles by scaffold """ """ First, load in the BLAST files one by one and make a table that summarizes hits: FOR EACH BLAST FILE FOR EACH SEQUENCE(TARGET) IN EACH BLAST FILE GET THE TOTAL NUMBER OF HITS GET THE HIT LOCATIONS (TUPLE) ADD THIS INFO TO ONE_BIG_FILE AS FOLLOWS (BLAST FILE, SEQUENCE, [(HIT SCAFFOLD, HIT START, HIT END), (HIT SCAFFOLD, HIT START, HIT END), (HIT SCAFFOLD, HIT START, HIT END)]) SEPARATELY: IF TOTAL LEN(TUPLE(3)) IS GREATER THAN SOME THRESHOLD: TRIM THE LIST INTO FILTERED_LIST FROM FILTERED_LIST: FOR EACH SEQUENCE HASH (HIT SCAFFOLD, HIT START, HIT END) AND REMOVE DUPES MAKE A FLAT FILE WITH ALL THE (HIT SCAFFOLD, HIT START, HIT END) TUPLES COUNT THE SCAFFOLD OCCURRENCES OF EACH TARGET(DICTIONARY) PLOT OCCURRENCES PER SCAFFOLD """ from Bio.Blast.Applications import NcbiblastnCommandline import Bio from Bio import SeqIO from Bio.Blast import NCBIXML from Bio import Restriction from Bio.Restriction import * from Bio.Alphabet.IUPAC import IUPACAmbiguousDNA from Bio.Seq import Seq from Bio.SeqRecord import SeqRecord import seprepseq fasta_file = "../../Genomic Data/LAEVIS_7.1.repeatMasked.fa" # Input fasta file longestseqo = extractlongest.extractlongest(fasta_file) # Write out the longest sequence output_handle = open("longestseq.fasta", "w") Bio.SeqIO.write(longestseqo, output_handle, "fasta") output_handle.close() """ This is what I ran 6/21 on server """ # Read it in again longestseq = [] for record in Bio.SeqIO.parse(open("longestseq.fasta", "rU"), "fasta"): longestseq.append(record) longestseq = longestseq[0] nonrepseqs = seprepseq.seprepseq(longestseq) #Define a list of sequences to exclude from the top144 (i.e ones you already have oligos for) #excludelist = ["12709050", "8261124", "5570943", "14351343", "8489316", "9593441", "19747367", "14421422"] # Sort nonrepseqs by length, take longest 144 sortednonrepseqs = [] for record in nonrepseqs: #if record.id not in excludelist: sortednonrepseqs.append(record) #sortednonrepseqs = (record for record in longestseq if record.id in excludelist) sortednonrepseqs.sort(cmp=lambda x,y: cmp(len(y),len(x))) amps144 = sortednonrepseqs[0:300] # now 300 instead of 144... # Write out a FASTA file of amps144 records = amps144 SeqIO.write(records, "amps144.fasta", "fasta") """ Next part: cut up 300 amps """ amps_300 = [] master_featlist = [] for record in Bio.SeqIO.parse("amps144.fasta", "fasta"): record.seq.alphabet = IUPACAmbiguousDNA() amps144_300.append(record) for individual_sequence in amps_300: # Use the 300 amps that has ids matching the BLAST file names. matchlist= [] offtgt = [] title = "" blast_records = [] result_handle = open("blastresults_"+ str(individual_sequence.id) + ".blast") blast_records = NCBIXML.parse(result_handle) # use NCBIXML.parse(result_handle) for multiple queries here twentymers = seprepseq.maketwentymers(individual_sequence) # Re-generating cut 20mers only because this is the only source of info for locations of blast query results twentymers_meta_tuples = [(item.id+" "+item.description, int(item.name)+int(item.dbxrefs[0]), item.seq) for item in twentymers] blast_records_list = [] # Harness the generator to make a list... for blast_record in blast_records: blast_records_list.append(blast_record) result_handle.close() annotated_blast_records = [] for blast_record in blast_records_list: for y in twentymers_meta_tuples: if blast_record.query in y[0]: annotated_blast_records.append([blast_record, y]) # Next: get necessary info from blast_records_list. (total hits per seq, hit locations) for blast_record in annotated_blast_records: featlist = [] for x in blast_record[0].alignments: for y in x.hsps: featlist.append([individual_sequence.id, x.title, y.sbjct_start, y.sbjct_end, y.positives]) blast_record.append(featlist) master_featlist.append(annotated_blast_records[:]) print(individual_sequence.id) #master_featlist_file = open("master_featlist.csv", "w") #for item in master_featlist: # master_featlist_file.write("{0} \n".format(item)) #master_featlist_file.close() import cPickle as pickle with open('master_featlist_file.pkl', 'wb') as output: pickle.dump(master_featlist, output, -1) output.close() with open('master_featlist_file.pkl', 'rb') as input: master_featlist = pickle.load(input) """ This probably isn't useful now. # Figure out the target yield as a function of tolerance of mismatches specificity_vs_tolerance = [] for i in range(300)[1:]: specificity_vs_tolerance.append(len(blast_records_list) - len([item[0:3] for item in matchlist if item[5] == i])) # Returns the number of targets with more than i hits in the genome # Can you figure out the lengths of the scaffolds, badness plot and dynamically choose a cutoff? offtgt20 = [item[0:3] for item in matchlist if item[5] == 20] offtgt25 = [item[0:3] for item in matchlist if item[5] == 25] offtgt50 = [item[0:3] for item in matchlist if item[5] == 50] offtgt100 = [item[0:3] for item in matchlist if item[5] == 100] # Make a database matching target to its own off-target hits list: ## The NAME attribute of the twentymers list that was the input to FILE ## would have been useful here, but it was lost when output to FASTA and sent to BLAST import string matchlist_with_tgt_abs_locations = matchlist[:] for item in matchlist_with_tgt_abs_locations: item_relative_loc_within_amp = string.find(str(item[2]), str(individual_sequence.seq)) itemloc = tuple(int(item[0]) + int(string.find(str(item[2]), str(individual_sequence.seq)))) item = item + tuple(int(item[0]) + int(string.find(str(item[2]), str(individual_sequence.seq)))) """ """ Guess I need two things: for each cutoff, figure out what the targeting plot looks like (can we make a metric?) AND figure out the number of targetable regions on longscaffold after masking. Going to need to: - generate a cut=id (and sequence) scaffold hits list to refer to later: DONE - this is master_featlist - for each offtgt amp of a cutoff: mask on offtgts -DONE (amps144_masked_cutoff) generate new amps, top 144 -DONE - for new amps: re-cut make cut ids that can be matched to already-made cut id scaffold hits list do specificity analysis() """ #cutoffsrange = range(5, 201, 10) # Testing a range from 5 to 100 in steps of 5 cutoffsrange = [] expgenerator = (2**exp for exp in range(1, 11)) for n in expgenerator: print(n) cutoffsrange.append(n) replstr = "N" * 20 targets_per_threshold = [] amps144_300_current_cutoff = [] for cutoff in cutoffsrange: # generate a masked amps144 for a given offtarget threshold amps_144_300_current_cutoff_masked = [] offtgt_summary = [] for superitem in master_featlist: #because it's split into the original amps144 amps at a top level for twmer in superitem: # first, pick out the targets that are below threshold n_target_hits = len([subitem for subitem in [item[4] for item in twmer[-1]] if subitem > 17]) # this gets the count of hits for each target (over 18 nt matches) if n_target_hits > cutoff: # format of offtarget_summary: item, originating amp, position on originating amp offtgt_summary.append(((twmer[1:-1], int(twmer[-1:][0][0][0]), int(twmer[1][1]) - int(twmer[-1:][0][0][0])))) # if the count of hits is greater than the cutoff, add the item to a new list for masking. # Find the locations of poorly-specific seqs and mask them amps144_300_current_cutoff = amps144_300[:] for amp in amps144_300_current_cutoff: amp = SeqRecord(seq=amp.seq.tomutable(), id = amp.id) for tgt in offtgt_summary: if int(amp.id) == tgt[-2]: amp.seq[int(tgt[-1])-1:int(tgt[-1])+19] = replstr amps_144_300_current_cutoff_masked.append(amp) with open('amps144_300_masked_cutoff'+str(cutoff)+'.pkl', 'wb') as output: pickle.dump(amps_144_300_current_cutoff_masked, output, -1) output.close() # next, use the items in cutoff_summary to generate individual sequences out amps144 using seprepseqN() amps144_300_masked_cutoff_splitonNs = [] for item in amps_144_300_current_cutoff_masked: split = seprepseq.seprepseqN(item) #altered seprepseq to mask on Ns instead of lwrcase for x in split: x.id = int(item.id) + int(x.id) # haven't totally checked that ids are base-pair accurate, but they should be close amps144_300_masked_cutoff_splitonNs.append(split) amps144_300_masked_cutoff_splitonNs = [item for sublist in amps144_300_masked_cutoff_splitonNs for item in sublist] # Flatten the nested list amps144_300_masked_cutoff_splitonNs.sort(cmp=lambda x,y: cmp(len(y),len(x))) # Generate a sort... somehow #specific_amps144 = offtgt_mask_split_results[0:144] # Don't just pick longest # Here, we find the target density for each of 300 amps and sort by that: for item in amps144_300_masked_cutoff_splitonNs: cuts = [ScrFI.search(item.seq)] cuts.append(list(HpaII.search(item.seq))) cuts.append(list(BfaI.search(item.seq))) cuts = [t for sublist in cuts for t in sublist] cuts.sort() # This seems to be good code for removing dupes from a list, or nearby numbers from a list # Here, it's being used to prevent dupes from increasing apparent target density on amps result = [] num= 0 for i in cuts: if abs(i-num)<20: num=i else: result.append(i) num=i cuts = result[:] amp_tgt_count = len(cuts) * 2 item.description = amp_tgt_count amps144_300_masked_cutoff_splitonNs = sorted(amps144_300_masked_cutoff_splitonNs, key=lambda x: x.description) specific_amps144 = amps144_300_masked_cutoff_splitonNs[-144:] # Write out the best amps for item in specific_amps144: #gb needs the ids to be strings... item.id=str(item.id) item.description=str(item.description) # specific_amps144 is now ready to be written as gb... # This part just determines the number of targets in a given set of SeqRecords total_strategy_targets = 0 for item in specific_amps144: cuts = [ScrFI.search(item.seq)] cuts.append(list(HpaII.search(item.seq))) cuts.append(list(BfaI.search(item.seq))) cuts = [t for sublist in cuts for t in sublist] cuts.sort() # This seems to be good code for removing dupes from a list, or nearby numbers from a list result = [] num= 0 for i in cuts: if abs(i-num)<3: num=i else: result.append(i) num=i cuts = result[:] amp_tgt_count = len(cuts) * 2 total_strategy_targets = total_strategy_targets + amp_tgt_count print(str("There will be {0} total targets with this set of amplicons".format(total_strategy_targets))) targets_per_threshold.append((cutoff, total_strategy_targets)) #this doesn't take into account specificity, just target yield with open('secondarily_cut_amps_using_cutoff_'+str(cutoff)+'.pkl', 'wb') as output: pickle.dump(specific_amps144, output, -1) output.close() targets_per_threshold """ END CODE DESIGNED TO DETERMINE THE TOTAL LABELING AT EACH CUTOFF, WHEN CONSTRAINED TO 144 AMPS """ """ - for new amps: re-cut make cut ids that can be matched to already-made cut id scaffold hits list do specificity analysis() - almost done! next(weds): get the hits dictionary for all amps. there's something wrong with the hits dict now, though, so it seems to have 50% of the hits be a single locus... probably writing out the hit name wrong. this will have upstream effects... or maybe it was just that there were a lot of BfaI sites that were all the same, and they come out to hit the same spot? so, this is going to under-count when a locus repeats: is it only going to get the first place the hit? it shouldn't... """ from Bio.SeqFeature import SeqFeature, FeatureLocation for cutoff in cutoffsrange: ##pick up from here; make loop of below for all iters with open('secondarily_cut_amps_using_cutoff_'+str(cutoff)+'.pkl', 'rb') as input: specific_amps144 = pickle.load(input) longhitlist = [] for item in specific_amps144: paired_cuts_list =[] # Remove duplicate BLAST hits, since no one BLAST locus can be labeled twice # First, generate the string to be checked for duplicates (Scaffold, start_match, end_match) cuts = [] if len(ScrFI.search(item.seq)) > 0: cuts.append(list((ScrFI.search(item.seq), "ScrFI"))) if len(HpaII.search(item.seq)) > 0: cuts.append(list((HpaII.search(item.seq), "HpaII"))) if len(BfaI.search(item.seq)) > 0: cuts.append(list((BfaI.search(item.seq), "BfaI"))) #cuts = [t for sublist in cuts for t in sublist] for c in cuts: for u in c[0]: paired_cuts_list.append((u, c[-1])) paired_cuts_list.sort() # This seems to be good code for removing dupes from a list, or nearby numbers from a list # Next: have cut sites in amps. Using the correct adjustments, add them as seqfeatures absolute_locations_adjusted = [] i = 0 for m in paired_cuts_list: if i < len(paired_cuts_list)-1: if m[1] == "ScrFI": #offset here (e.g. 1:21 is for simulating MBN digeston) # because the entry.names are rooted on the right of each fragment, the length # of the entry.name has to be subtracted to get the desired left position for the "reverse" # tgts absolute_locations_adjusted.append(((int(item.id) + 1 + m[0], m[1]), "rev")) else: # Should work for HpaII/BfaI absolute_locations_adjusted.append(((int(item.id) + 2 + m[0], m[1]), "rev")) absolute_locations_adjusted.append(((int(item.id) + int(paired_cuts_list[i+1][0]) - 20, m[1]), "fwd")) # amp start + next cut point(i+1) - 2 i = i + 1 # hitlist = [] # for item in absolute_locations_adjusted: # for blast_result in master_featlist: # for subblast in blast_result: # if subblast[1][1] == item[0][0]: # scaffold_hits = [y for y in subblast[-1] if y[-1] > 17] # hitlist.append((item, \ # len([subitem for subitem in \ # [z[4] for z in subblast[-1]] if subitem > 17]), \ # [(x[1][:-19], x[2], x[3], x[4]) for x in scaffold_hits])) for v in absolute_locations_adjusted: for blast_result in master_featlist: for subblast in blast_result: if subblast[1][1] == v[0][0]: scaffold_hits = [y for y in subblast[-1] if y[-1] > 17] hitlist.append((v, \ len([subitem for subitem in \ [z[4] for z in subblast[-1]] if subitem > 17]), \ [(x[1][:-19], x[2], x[3], x[4]) for x in scaffold_hits])) # Ok, so: have a hitlist with info for each 20mer on its enzyme site, # direction, number of 18+mer hits and the scaffold locations of all hits # Next: extract scaffolds only and deduplicate hitlist_flat = [k for k in [h[-1] for h in [j for j in hitlist]]] from itertools import chain hitlist_flat = list(chain(*hitlist_flat)) longhitlist.append(list(zip(hitlist_flat, [0] * len(hitlist_flat)))) longhitlist_flat = list(chain(*longhitlist)) master_hitlist_dedup = dict(longhitlist_flat) x = 0 for x in longhitlist_flat: # This increments the hitlist_dict for each item in the flattened hitlist master_hitlist_dedup[x[0]] += 1 # Have: list of hits per locus. Don't really care about that because each locus can't be labeled twice, but it had to be deduplicated in this way. # Next, need to count the occurrence of each scaffold in the list deduped_on_scaffold = master_hitlist_dedup.copy() master_hitlist_dedup_list = deduped_on_scaffold.items() master_hitlist_dedup_list = [(x[0][0], 0) for x in master_hitlist_dedup_list] #Need instances of scaffold... deduped_on_scaffold = dict(master_hitlist_dedup_list) for x in master_hitlist_dedup_list: # This increments the hitlist_dict for each item in the flattened hitlist deduped_on_scaffold[x[0]] = deduped_on_scaffold[x[0]] + 1 #Write out the match hits; remember to call matchlistfile.close() or random shit happens! deduped_on_scaffold_list = deduped_on_scaffold.items() #otherwise just the key, not the value, gets output deduped_on_scaffold_file = open("deduped_on_scaffold_"+str(cutoff)+".csv", "w") for d in deduped_on_scaffold_list: deduped_on_scaffold_file.write("{0} \n".format(d)) deduped_on_scaffold_file.close() # Next up: collate the info (remove annoying parentheses etc) import csv scaffhits = [] with open("deduped_on_scaffold_"+str(cutoff)+".csv",'rb') as csvin: csvin = csv.reader(csvin, delimiter=',') for row in csvin: scaff = row scaffhits.append(scaff) for n in scaffhits: n[0] = n[0][3:-1] n[1] = int(n[1][1:-2]) n = str(n[0] + "," + str(n[1])) deduped_on_scaffold_file = open("deduped_on_scaffold_"+str(cutoff)+".csv", "w") for d in scaffhits: deduped_on_scaffold_file.write("{0} \n".format(d)) deduped_on_scaffold_file.close() """ END CODE RELATED TO *ANALYSIS* OF OFF TARGET HITS (RATHER THAN REFINEMENT OF AMPLICONS) """
eatingcrispr/VirtualEating
archive/Analysis of E. coli library/ResultsAgainstExpected.py
Python
apache-2.0
18,751
[ "BLAST" ]
8c73209efc1ab57b8dc67a5e5d8bda75ddf946221d835a9b324109d276d184c2
######################################################################## # $HeadURL$ ######################################################################## """ TaskQueueDB class is a front-end to the task queues db """ __RCSID__ = "ebed3a8 (2012-07-06 20:33:11 +0200) Adri Casajs <adria@ecm.ub.es>" import types import random from DIRAC import gConfig, gLogger, S_OK, S_ERROR from DIRAC.WorkloadManagementSystem.private.SharesCorrector import SharesCorrector from DIRAC.WorkloadManagementSystem.private.Queues import maxCPUSegments from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations from DIRAC.Core.Utilities import List from DIRAC.Core.Utilities.DictCache import DictCache from DIRAC.Core.Base.DB import DB from DIRAC.Core.Security import Properties, CS DEFAULT_GROUP_SHARE = 1000 TQ_MIN_SHARE = 0.001 class TaskQueueDB( DB ): def __init__( self, maxQueueSize = 10 ): random.seed() DB.__init__( self, 'TaskQueueDB', 'WorkloadManagement/TaskQueueDB', maxQueueSize ) self.__multiValueDefFields = ( 'Sites', 'GridCEs', 'GridMiddlewares', 'BannedSites', 'Platforms', 'PilotTypes', 'SubmitPools', 'JobTypes', 'Tags' ) self.__multiValueMatchFields = ( 'GridCE', 'Site', 'GridMiddleware', 'Platform', 'PilotType', 'SubmitPool', 'JobType', 'Tag' ) self.__tagMatchFields = ( 'Tag', ) self.__bannedJobMatchFields = ( 'Site', ) self.__strictRequireMatchFields = ( 'SubmitPool', 'Platform', 'PilotType', 'Tag' ) self.__singleValueDefFields = ( 'OwnerDN', 'OwnerGroup', 'Setup', 'CPUTime' ) self.__mandatoryMatchFields = ( 'Setup', 'CPUTime' ) self.__priorityIgnoredFields = ( 'Sites', 'BannedSites' ) self.__maxJobsInTQ = 5000 self.__defaultCPUSegments = maxCPUSegments self.__maxMatchRetry = 3 self.__jobPriorityBoundaries = ( 0.001, 10 ) self.__groupShares = {} self.__deleteTQWithDelay = DictCache( self.__deleteTQIfEmpty ) self.__opsHelper = Operations() self.__ensureInsertionIsSingle = False self.__sharesCorrector = SharesCorrector( self.__opsHelper ) result = self.__initializeDB() if not result[ 'OK' ]: raise Exception( "Can't create tables: %s" % result[ 'Message' ] ) def enableAllTaskQueues( self ): """ Enable all Task queues """ return self.updateFields( "tq_TaskQueues", updateDict = { "Enabled" :"1" } ) def findOrphanJobs( self ): """ Find jobs that are not in any task queue """ result = self._query( "select JobID from tq_Jobs WHERE TQId not in (SELECT TQId from tq_TaskQueues)" ) if not result[ 'OK' ]: return result return S_OK( [ row[0] for row in result[ 'Value' ] ] ) def isSharesCorrectionEnabled( self ): return self.__getCSOption( "EnableSharesCorrection", False ) def getSingleValueTQDefFields( self ): return self.__singleValueDefFields def getMultiValueTQDefFields( self ): return self.__multiValueDefFields def getMultiValueMatchFields( self ): return self.__multiValueMatchFields def __getCSOption( self, optionName, defValue ): return self.__opsHelper.getValue( "JobScheduling/%s" % optionName, defValue ) def getPrivatePilots( self ): return self.__getCSOption( "PrivatePilotTypes", [ 'private' ] ) def getValidPilotTypes( self ): return self.__getCSOption( "AllPilotTypes", [ 'private' ] ) def __initializeDB( self ): """ Create the tables """ result = self._query( "show tables" ) if not result[ 'OK' ]: return result tablesInDB = [ t[0] for t in result[ 'Value' ] ] tablesToCreate = {} self.__tablesDesc = {} self.__tablesDesc[ 'tq_TaskQueues' ] = { 'Fields' : { 'TQId' : 'INTEGER UNSIGNED AUTO_INCREMENT NOT NULL', 'OwnerDN' : 'VARCHAR(255) NOT NULL', 'OwnerGroup' : 'VARCHAR(32) NOT NULL', 'Setup' : 'VARCHAR(32) NOT NULL', 'CPUTime' : 'BIGINT UNSIGNED NOT NULL', 'Priority' : 'FLOAT NOT NULL', 'Enabled' : 'TINYINT(1) NOT NULL DEFAULT 0' }, 'PrimaryKey' : 'TQId', 'Indexes': { 'TQOwner': [ 'OwnerDN', 'OwnerGroup', 'Setup', 'CPUTime' ] } } self.__tablesDesc[ 'tq_Jobs' ] = { 'Fields' : { 'TQId' : 'INTEGER UNSIGNED NOT NULL', 'JobId' : 'INTEGER UNSIGNED NOT NULL', 'Priority' : 'INTEGER UNSIGNED NOT NULL', 'RealPriority' : 'FLOAT NOT NULL' }, 'PrimaryKey' : 'JobId', 'Indexes': { 'TaskIndex': [ 'TQId' ] }, } for multiField in self.__multiValueDefFields: tableName = 'tq_TQTo%s' % multiField self.__tablesDesc[ tableName ] = { 'Fields' : { 'TQId' : 'INTEGER UNSIGNED NOT NULL', 'Value' : 'VARCHAR(64) NOT NULL' }, 'Indexes': { 'TaskIndex': [ 'TQId' ], '%sIndex' % multiField: [ 'Value' ] }, } for tableName in self.__tablesDesc: if not tableName in tablesInDB: tablesToCreate[ tableName ] = self.__tablesDesc[ tableName ] return self._createTables( tablesToCreate ) def getGroupsInTQs( self ): cmdSQL = "SELECT DISTINCT( OwnerGroup ) FROM `tq_TaskQueues`" result = self._query( cmdSQL ) if not result[ 'OK' ]: return result return S_OK( [ row[0] for row in result[ 'Value' ] ] ) def forceRecreationOfTables( self ): dropSQL = "DROP TABLE IF EXISTS %s" % ", ".join( self.__tablesDesc ) result = self._update( dropSQL ) if not result[ 'OK' ]: return result return self._createTables( self.__tablesDesc ) def __strDict( self, dDict ): lines = [] for key in sorted( dDict ): lines.append( " %s" % key ) value = dDict[ key ] if type( value ) in ( types.ListType, types.TupleType ): lines.extend( [ " %s" % v for v in value ] ) else: lines.append( " %s" % str( value ) ) return "{\n%s\n}" % "\n".join( lines ) def fitCPUTimeToSegments( self, cpuTime ): """ Fit the CPU time to the valid segments """ maxCPUSegments = self.__getCSOption( "taskQueueCPUTimeIntervals", self.__defaultCPUSegments ) try: maxCPUSegments = [ int( seg ) for seg in maxCPUSegments ] #Check segments in the CS last = 0 for cpuS in maxCPUSegments: if cpuS <= last: maxCPUSegments = self.__defaultCPUSegments break last = cpuS except: maxCPUSegments = self.__defaultCPUSegments #Map to a segment for iP in range( len( maxCPUSegments ) ): cpuSegment = maxCPUSegments[ iP ] if cpuTime <= cpuSegment: return cpuSegment return maxCPUSegments[-1] def _checkTaskQueueDefinition( self, tqDefDict ): """ Check a task queue definition dict is valid """ # Confine the LHCbPlatform legacy option here, use Platform everywhere else # until the LHCbPlatform is no more used in the TaskQueueDB if 'LHCbPlatforms' in tqDefDict and not "Platforms" in tqDefDict: tqDefDict['Platforms'] = tqDefDict['LHCbPlatforms'] if 'SystemConfigs' in tqDefDict and not "Platforms" in tqDefDict: tqDefDict['Platforms'] = tqDefDict['SystemConfigs'] for field in self.__singleValueDefFields: if field not in tqDefDict: return S_ERROR( "Missing mandatory field '%s' in task queue definition" % field ) fieldValueType = type( tqDefDict[ field ] ) if field in [ "CPUTime" ]: if fieldValueType not in ( types.IntType, types.LongType ): return S_ERROR( "Mandatory field %s value type is not valid: %s" % ( field, fieldValueType ) ) else: if fieldValueType not in ( types.StringType, types.UnicodeType ): return S_ERROR( "Mandatory field %s value type is not valid: %s" % ( field, fieldValueType ) ) result = self._escapeString( tqDefDict[ field ] ) if not result[ 'OK' ]: return result tqDefDict[ field ] = result[ 'Value' ] for field in self.__multiValueDefFields: if field not in tqDefDict: continue fieldValueType = type( tqDefDict[ field ] ) if fieldValueType not in ( types.ListType, types.TupleType ): return S_ERROR( "Multi value field %s value type is not valid: %s" % ( field, fieldValueType ) ) result = self._escapeValues( tqDefDict[ field ] ) if not result[ 'OK' ]: return result tqDefDict[ field ] = result[ 'Value' ] #FIXME: This is not used if 'PrivatePilots' in tqDefDict: validPilotTypes = self.getValidPilotTypes() for pilotType in tqDefDict[ 'PrivatePilots' ]: if pilotType not in validPilotTypes: return S_ERROR( "PilotType %s is invalid" % pilotType ) return S_OK( tqDefDict ) def _checkMatchDefinition( self, tqMatchDict ): """ Check a task queue match dict is valid """ def travelAndCheckType( value, validTypes, escapeValues = True ): valueType = type( value ) if valueType in ( types.ListType, types.TupleType ): for subValue in value: subValueType = type( subValue ) if subValueType not in validTypes: return S_ERROR( "List contained type %s is not valid -> %s" % ( subValueType, validTypes ) ) if escapeValues: return self._escapeValues( value ) return S_OK( value ) else: if valueType not in validTypes: return S_ERROR( "Type %s is not valid -> %s" % ( valueType, validTypes ) ) if escapeValues: return self._escapeString( value ) return S_OK( value ) # Confine the LHCbPlatform legacy option here, use Platform everywhere else # until the LHCbPlatform is no more used in the TaskQueueDB if 'LHCbPlatform' in tqMatchDict and not "Platform" in tqMatchDict: tqMatchDict['Platform'] = tqMatchDict['LHCbPlatform'] if 'SystemConfig' in tqMatchDict and not "Platform" in tqMatchDict: tqMatchDict['Platform'] = tqMatchDict['SystemConfig'] for field in self.__singleValueDefFields: if field not in tqMatchDict: if field in self.__mandatoryMatchFields: return S_ERROR( "Missing mandatory field '%s' in match request definition" % field ) continue fieldValue = tqMatchDict[ field ] if field in [ "CPUTime" ]: result = travelAndCheckType( fieldValue, ( types.IntType, types.LongType ), escapeValues = False ) else: result = travelAndCheckType( fieldValue, ( types.StringType, types.UnicodeType ) ) if not result[ 'OK' ]: return S_ERROR( "Match definition field %s failed : %s" % ( field, result[ 'Message' ] ) ) tqMatchDict[ field ] = result[ 'Value' ] #Check multivalue for multiField in self.__multiValueMatchFields: for field in ( multiField, "Banned%s" % multiField ): if field in tqMatchDict: fieldValue = tqMatchDict[ field ] result = travelAndCheckType( fieldValue, ( types.StringType, types.UnicodeType ) ) if not result[ 'OK' ]: return S_ERROR( "Match definition field %s failed : %s" % ( field, result[ 'Message' ] ) ) tqMatchDict[ field ] = result[ 'Value' ] return S_OK( tqMatchDict ) def __createTaskQueue( self, tqDefDict, priority = 1, connObj = False ): """ Create a task queue Returns S_OK( tqId ) / S_ERROR """ if not connObj: result = self._getConnection() if not result[ 'OK' ]: return S_ERROR( "Can't create task queue: %s" % result[ 'Message' ] ) connObj = result[ 'Value' ] tqDefDict[ 'CPUTime' ] = self.fitCPUTimeToSegments( tqDefDict[ 'CPUTime' ] ) sqlSingleFields = [ 'TQId', 'Priority' ] sqlValues = [ "0", str( priority ) ] for field in self.__singleValueDefFields: sqlSingleFields.append( field ) sqlValues.append( tqDefDict[ field ] ) #Insert the TQ Disabled sqlSingleFields.append( "Enabled" ) sqlValues.append( "0" ) cmd = "INSERT INTO tq_TaskQueues ( %s ) VALUES ( %s )" % ( ", ".join( sqlSingleFields ), ", ".join( [ str( v ) for v in sqlValues ] ) ) result = self._update( cmd, conn = connObj ) if not result[ 'OK' ]: self.log.error( "Can't insert TQ in DB", result[ 'Value' ] ) return result if 'lastRowId' in result: tqId = result['lastRowId'] else: result = self._query( "SELECT LAST_INSERT_ID()", conn = connObj ) if not result[ 'OK' ]: self.cleanOrphanedTaskQueues( connObj = connObj ) return S_ERROR( "Can't determine task queue id after insertion" ) tqId = result[ 'Value' ][0][0] for field in self.__multiValueDefFields: if field not in tqDefDict: continue values = List.uniqueElements( [ value for value in tqDefDict[ field ] if value.strip() ] ) if not values: continue cmd = "INSERT INTO `tq_TQTo%s` ( TQId, Value ) VALUES " % field cmd += ", ".join( [ "( %s, %s )" % ( tqId, str( value ) ) for value in values ] ) result = self._update( cmd, conn = connObj ) if not result[ 'OK' ]: self.log.error( "Failed to insert %s condition" % field, result[ 'Message' ] ) self.cleanOrphanedTaskQueues( connObj = connObj ) return S_ERROR( "Can't insert values %s for field %s: %s" % ( str( values ), field, result[ 'Message' ] ) ) self.log.info( "Created TQ %s" % tqId ) return S_OK( tqId ) def cleanOrphanedTaskQueues( self, connObj = False ): """ Delete all empty task queues """ self.log.info( "Cleaning orphaned TQs" ) result = self._update( "DELETE FROM `tq_TaskQueues` WHERE Enabled >= 1 AND TQId not in ( SELECT DISTINCT TQId from `tq_Jobs` )", conn = connObj ) if not result[ 'OK' ]: return result for mvField in self.__multiValueDefFields: result = self._update( "DELETE FROM `tq_TQTo%s` WHERE TQId not in ( SELECT DISTINCT TQId from `tq_TaskQueues` )" % mvField, conn = connObj ) if not result[ 'OK' ]: return result return S_OK() def __setTaskQueueEnabled( self, tqId, enabled = True, connObj = False ): if enabled: enabled = "+ 1" else: enabled = "- 1" upSQL = "UPDATE `tq_TaskQueues` SET Enabled = Enabled %s WHERE TQId=%d" % ( enabled, tqId ) result = self._update( upSQL, conn = connObj ) if not result[ 'OK' ]: self.log.error( "Error setting TQ state", "TQ %s State %s: %s" % ( tqId, enabled, result[ 'Message' ] ) ) return result updated = result['Value'] > 0 if updated: self.log.info( "Set enabled = %s for TQ %s" % ( enabled, tqId ) ) return S_OK( updated ) def __hackJobPriority( self, jobPriority ): jobPriority = min( max( int( jobPriority ), self.__jobPriorityBoundaries[0] ), self.__jobPriorityBoundaries[1] ) if jobPriority == self.__jobPriorityBoundaries[0]: return 10 ** ( -5 ) if jobPriority == self.__jobPriorityBoundaries[1]: return 10 ** 6 return jobPriority def insertJob( self, jobId, tqDefDict, jobPriority, skipTQDefCheck = False, numRetries = 10 ): """ Insert a job in a task queue Returns S_OK( tqId ) / S_ERROR """ try: test = long( jobId ) except: return S_ERROR( "JobId is not a number!" ) retVal = self._getConnection() if not retVal[ 'OK' ]: return S_ERROR( "Can't insert job: %s" % retVal[ 'Message' ] ) connObj = retVal[ 'Value' ] if not skipTQDefCheck: tqDefDict = dict( tqDefDict ) retVal = self._checkTaskQueueDefinition( tqDefDict ) if not retVal[ 'OK' ]: self.log.error( "TQ definition check failed", retVal[ 'Message' ] ) return retVal tqDefDict = retVal[ 'Value' ] tqDefDict[ 'CPUTime' ] = self.fitCPUTimeToSegments( tqDefDict[ 'CPUTime' ] ) self.log.info( "Inserting job %s with requirements: %s" % ( jobId, self.__strDict( tqDefDict ) ) ) retVal = self.__findAndDisableTaskQueue( tqDefDict, skipDefinitionCheck = True, connObj = connObj ) if not retVal[ 'OK' ]: return retVal tqInfo = retVal[ 'Value' ] newTQ = False if not tqInfo[ 'found' ]: self.log.info( "Creating a TQ for job %s" % jobId ) retVal = self.__createTaskQueue( tqDefDict, 1, connObj = connObj ) if not retVal[ 'OK' ]: return retVal tqId = retVal[ 'Value' ] newTQ = True else: tqId = tqInfo[ 'tqId' ] self.log.info( "Found TQ %s for job %s requirements" % ( tqId, jobId ) ) try: result = self.__insertJobInTaskQueue( jobId, tqId, int( jobPriority ), checkTQExists = False, connObj = connObj ) if not result[ 'OK' ]: self.log.error( "Error inserting job in TQ", "Job %s TQ %s: %s" % ( jobId, tqId, result[ 'Message' ] ) ) return result if newTQ: self.recalculateTQSharesForEntity( tqDefDict[ 'OwnerDN' ], tqDefDict[ 'OwnerGroup' ], connObj = connObj ) finally: self.__setTaskQueueEnabled( tqId, True ) return S_OK() def __insertJobInTaskQueue( self, jobId, tqId, jobPriority, checkTQExists = True, connObj = False ): """ Insert a job in a given task queue """ self.log.info( "Inserting job %s in TQ %s with priority %s" % ( jobId, tqId, jobPriority ) ) if not connObj: result = self._getConnection() if not result[ 'OK' ]: return S_ERROR( "Can't insert job: %s" % result[ 'Message' ] ) connObj = result[ 'Value' ] if checkTQExists: result = self._query( "SELECT tqId FROM `tq_TaskQueues` WHERE TQId = %s" % tqId, conn = connObj ) if not result[ 'OK' ] or len ( result[ 'Value' ] ) == 0: return S_OK( "Can't find task queue with id %s: %s" % ( tqId, result[ 'Message' ] ) ) hackedPriority = self.__hackJobPriority( jobPriority ) result = self._update( "INSERT INTO tq_Jobs ( TQId, JobId, Priority, RealPriority ) VALUES ( %s, %s, %s, %f ) ON DUPLICATE KEY UPDATE TQId = %s, Priority = %s, RealPriority = %f" % ( tqId, jobId, jobPriority, hackedPriority, tqId, jobPriority, hackedPriority ), conn = connObj ) if not result[ 'OK' ]: return result return S_OK() def __generateTQFindSQL( self, tqDefDict, skipDefinitionCheck = False, connObj = False ): """ Find a task queue that has exactly the same requirements """ if not skipDefinitionCheck: tqDefDict = dict( tqDefDict ) result = self._checkTaskQueueDefinition( tqDefDict ) if not result[ 'OK' ]: return result tqDefDict = result[ 'Value' ] sqlCondList = [] for field in self.__singleValueDefFields: sqlCondList.append( "`tq_TaskQueues`.%s = %s" % ( field, tqDefDict[ field ] ) ) #MAGIC SUBQUERIES TO ENSURE STRICT MATCH for field in self.__multiValueDefFields: tableName = '`tq_TQTo%s`' % field if field in tqDefDict and tqDefDict[ field ]: firstQuery = "SELECT COUNT(%s.Value) FROM %s WHERE %s.TQId = `tq_TaskQueues`.TQId" % ( tableName, tableName, tableName ) grouping = "GROUP BY %s.TQId" % tableName valuesList = List.uniqueElements( [ value.strip() for value in tqDefDict[ field ] if value.strip() ] ) numValues = len( valuesList ) secondQuery = "%s AND %s.Value in (%s)" % ( firstQuery, tableName, ",".join( [ "%s" % str( value ) for value in valuesList ] ) ) sqlCondList.append( "%s = (%s %s)" % ( numValues, firstQuery, grouping ) ) sqlCondList.append( "%s = (%s %s)" % ( numValues, secondQuery, grouping ) ) else: sqlCondList.append( "`tq_TaskQueues`.TQId not in ( SELECT DISTINCT %s.TQId from %s )" % ( tableName, tableName ) ) #END MAGIC: That was easy ;) return S_OK( " AND ".join( sqlCondList ) ) def __findAndDisableTaskQueue( self, tqDefDict, skipDefinitionCheck = False, retries = 10, connObj = False ): """ Disable and find TQ """ for i in range( retries ): result = self.__findSmallestTaskQueue( tqDefDict, skipDefinitionCheck = skipDefinitionCheck, connObj = connObj ) if not result[ 'OK' ]: return result data = result[ 'Value' ] if not data[ 'found' ]: return result if data[ 'enabled' ] < 1: gLogger.notice( "TaskQueue {tqId} seems to be already disabled ({enabled})".format( **data ) ) result = self.__setTaskQueueEnabled( data[ 'tqId' ], False ) if result[ 'OK' ]: return S_OK( data ) return S_ERROR( "Could not disable TQ" ) def __findSmallestTaskQueue( self, tqDefDict, skipDefinitionCheck = False, connObj = False ): """ Find a task queue that has exactly the same requirements """ result = self.__generateTQFindSQL( tqDefDict, skipDefinitionCheck = skipDefinitionCheck, connObj = connObj ) if not result[ 'OK' ]: return result sqlCmd = "SELECT COUNT( `tq_Jobs`.JobID ), `tq_TaskQueues`.TQId, `tq_TaskQueues`.Enabled FROM `tq_TaskQueues`, `tq_Jobs`" sqlCmd = "%s WHERE `tq_TaskQueues`.TQId = `tq_Jobs`.TQId AND %s GROUP BY `tq_Jobs`.TQId ORDER BY COUNT( `tq_Jobs`.JobID ) ASC" % ( sqlCmd, result[ 'Value' ] ) result = self._query( sqlCmd, conn = connObj ) if not result[ 'OK' ]: return S_ERROR( "Can't find task queue: %s" % result[ 'Message' ] ) data = result[ 'Value' ] if len( data ) == 0 or data[0][0] >= self.__maxJobsInTQ: return S_OK( { 'found' : False } ) return S_OK( { 'found' : True, 'tqId' : data[0][1], 'enabled' : data[0][2], 'jobs' : data[0][0] } ) def matchAndGetJob( self, tqMatchDict, numJobsPerTry = 50, numQueuesPerTry = 10, negativeCond = {} ): """ Match a job """ #Make a copy to avoid modification of original if escaping needs to be done tqMatchDict = dict( tqMatchDict ) self.log.info( "Starting match for requirements", self.__strDict( tqMatchDict ) ) retVal = self._checkMatchDefinition( tqMatchDict ) if not retVal[ 'OK' ]: self.log.error( "TQ match request check failed", retVal[ 'Message' ] ) return retVal retVal = self._getConnection() if not retVal[ 'OK' ]: return S_ERROR( "Can't connect to DB: %s" % retVal[ 'Message' ] ) connObj = retVal[ 'Value' ] preJobSQL = "SELECT `tq_Jobs`.JobId, `tq_Jobs`.TQId FROM `tq_Jobs` WHERE `tq_Jobs`.TQId = %s AND `tq_Jobs`.Priority = %s" prioSQL = "SELECT `tq_Jobs`.Priority FROM `tq_Jobs` WHERE `tq_Jobs`.TQId = %s ORDER BY RAND() / `tq_Jobs`.RealPriority ASC LIMIT 1" postJobSQL = " ORDER BY `tq_Jobs`.JobId ASC LIMIT %s" % numJobsPerTry for matchTry in range( self.__maxMatchRetry ): if 'JobID' in tqMatchDict: # A certain JobID is required by the resource, so all TQ are to be considered retVal = self.matchAndGetTaskQueue( tqMatchDict, numQueuesToGet = 0, skipMatchDictDef = True, connObj = connObj ) preJobSQL = "%s AND `tq_Jobs`.JobId = %s " % ( preJobSQL, tqMatchDict['JobID'] ) else: retVal = self.matchAndGetTaskQueue( tqMatchDict, numQueuesToGet = numQueuesPerTry, skipMatchDictDef = True, negativeCond = negativeCond, connObj = connObj ) if not retVal[ 'OK' ]: return retVal tqList = retVal[ 'Value' ] if len( tqList ) == 0: self.log.info( "No TQ matches requirements" ) return S_OK( { 'matchFound' : False, 'tqMatch' : tqMatchDict } ) for tqId, tqOwnerDN, tqOwnerGroup in tqList: self.log.info( "Trying to extract jobs from TQ %s" % tqId ) retVal = self._query( prioSQL % tqId, conn = connObj ) if not retVal[ 'OK' ]: return S_ERROR( "Can't retrieve winning priority for matching job: %s" % retVal[ 'Message' ] ) if len( retVal[ 'Value' ] ) == 0: continue prio = retVal[ 'Value' ][0][0] retVal = self._query( "%s %s" % ( preJobSQL % ( tqId, prio ), postJobSQL ), conn = connObj ) if not retVal[ 'OK' ]: return S_ERROR( "Can't begin transaction for matching job: %s" % retVal[ 'Message' ] ) jobTQList = [ ( row[0], row[1] ) for row in retVal[ 'Value' ] ] if len( jobTQList ) == 0: gLogger.info( "Task queue %s seems to be empty, triggering a cleaning" % tqId ) self.__deleteTQWithDelay.add( tqId, 300, ( tqId, tqOwnerDN, tqOwnerGroup ) ) while len( jobTQList ) > 0: jobId, tqId = jobTQList.pop( random.randint( 0, len( jobTQList ) - 1 ) ) self.log.info( "Trying to extract job %s from TQ %s" % ( jobId, tqId ) ) retVal = self.deleteJob( jobId, connObj = connObj ) if not retVal[ 'OK' ]: msgFix = "Could not take job" msgVar = " %s out from the TQ %s: %s" % ( jobId, tqId, retVal[ 'Message' ] ) self.log.error( msgFix, msgVar ) return S_ERROR( msgFix + msgVar ) if retVal[ 'Value' ] == True : self.log.info( "Extracted job %s with prio %s from TQ %s" % ( jobId, prio, tqId ) ) return S_OK( { 'matchFound' : True, 'jobId' : jobId, 'taskQueueId' : tqId, 'tqMatch' : tqMatchDict } ) self.log.info( "No jobs could be extracted from TQ %s" % tqId ) self.log.info( "Could not find a match after %s match retries" % self.__maxMatchRetry ) return S_ERROR( "Could not find a match after %s match retries" % self.__maxMatchRetry ) def matchAndGetTaskQueue( self, tqMatchDict, numQueuesToGet = 1, skipMatchDictDef = False, negativeCond = {}, connObj = False ): """ Get a queue that matches the requirements """ #Make a copy to avoid modification of original if escaping needs to be done tqMatchDict = dict( tqMatchDict ) if not skipMatchDictDef: retVal = self._checkMatchDefinition( tqMatchDict ) if not retVal[ 'OK' ]: return retVal retVal = self.__generateTQMatchSQL( tqMatchDict, numQueuesToGet = numQueuesToGet, negativeCond = negativeCond ) if not retVal[ 'OK' ]: return retVal matchSQL = retVal[ 'Value' ] retVal = self._query( matchSQL, conn = connObj ) if not retVal[ 'OK' ]: return retVal return S_OK( [ ( row[0], row[1], row[2] ) for row in retVal[ 'Value' ] ] ) def __generateSQLSubCond( self, sqlString, value, boolOp = 'OR' ): if type( value ) not in ( types.ListType, types.TupleType ): return sqlString % str( value ).strip() sqlORList = [] for v in value: sqlORList.append( sqlString % str( v ).strip() ) return "( %s )" % ( " %s " % boolOp ).join( sqlORList ) def __generateNotSQL( self, tableDict, negativeCond ): """ Generate negative conditions Can be a list of dicts or a dict: - list of dicts will be OR of conditional dicts - dicts will be normal conditional dict ( kay1 in ( v1, v2, ... ) AND key2 in ( v3, v4, ... ) ) """ condType = type( negativeCond ) if condType in ( types.ListType, types.TupleType ): sqlCond = [] for cD in negativeCond: sqlCond.append( self.__generateNotDictSQL( tableDict, cD ) ) return " ( %s )" % " OR ".join( sqlCond ) elif condType == types.DictType: return self.__generateNotDictSQL( tableDict, negativeCond ) raise RuntimeError( "negativeCond has to be either a list or a dict and it's %s" % condType ) def __generateNotDictSQL( self, tableDict, negativeCond ): """ Generate the negative sql condition from a standard condition dict not ( cond1 and cond2 ) = ( not cond1 or not cond 2 ) For instance: { 'Site': 'S1', 'JobType': [ 'T1', 'T2' ] } ( not 'S1' in Sites or ( not 'T1' in JobType and not 'T2' in JobType ) ) S2 T1 -> not False or ( not True and not False ) -> True or ... -> True -> Eligible S1 T3 -> not True or ( not False and not False ) -> False or (True and True ) -> True -> Eligible S1 T1 -> not True or ( not True and not False ) -> False or ( False and True ) -> False -> Nop """ condList = [] for field in negativeCond: if field in self.__multiValueMatchFields: fullTableN = '`tq_TQTo%ss`' % field valList = negativeCond[ field ] if type( valList ) not in ( types.TupleType, types.ListType ): valList = ( valList, ) subList = [] for value in valList: value = self._escapeString( value )[ 'Value' ] sql = "%s NOT IN ( SELECT %s.Value FROM %s WHERE %s.TQId = tq.TQId )" % ( value, fullTableN, fullTableN, fullTableN ) subList.append( sql ) condList.append( "( %s )" % " AND ".join( subList ) ) elif field in self.__singleValueDefFields: for value in negativeCond[field]: value = self._escapeString( value )[ 'Value' ] sql = "%s != tq.%s " % ( value, field ) condList.append( sql ) return "( %s )" % " OR ".join( condList ) def __generateTablesName( self, sqlTables, field ): fullTableName = 'tq_TQTo%ss' % field if fullTableName not in sqlTables: tableN = field.lower() sqlTables[ fullTableName ] = tableN return tableN, "`%s`" % fullTableName, return sqlTables[ fullTableName ], "`%s`" % fullTableName def __generateTQMatchSQL( self, tqMatchDict, numQueuesToGet = 1, negativeCond = {} ): """ Generate the SQL needed to match a task queue """ #Only enabled TQs sqlCondList = [] sqlTables = { "tq_TaskQueues" : "tq" } #If OwnerDN and OwnerGroup are defined only use those combinations that make sense if 'OwnerDN' in tqMatchDict and 'OwnerGroup' in tqMatchDict: groups = tqMatchDict[ 'OwnerGroup' ] if type( groups ) not in ( types.ListType, types.TupleType ): groups = [ groups ] dns = tqMatchDict[ 'OwnerDN' ] if type( dns ) not in ( types.ListType, types.TupleType ): dns = [ dns ] ownerConds = [] for group in groups: if Properties.JOB_SHARING in CS.getPropertiesForGroup( group.replace( '"', "" ) ): ownerConds.append( "tq.OwnerGroup = %s" % group ) else: for dn in dns: ownerConds.append( "( tq.OwnerDN = %s AND tq.OwnerGroup = %s )" % ( dn, group ) ) sqlCondList.append( " OR ".join( ownerConds ) ) else: #If not both are defined, just add the ones that are defined for field in ( 'OwnerGroup', 'OwnerDN' ): if field in tqMatchDict: sqlCondList.append( self.__generateSQLSubCond( "tq.%s = %%s" % field, tqMatchDict[ field ] ) ) #Type of single value conditions for field in ( 'CPUTime', 'Setup' ): if field in tqMatchDict: if field in ( 'CPUTime' ): sqlCondList.append( self.__generateSQLSubCond( "tq.%s <= %%s" % field, tqMatchDict[ field ] ) ) else: sqlCondList.append( self.__generateSQLSubCond( "tq.%s = %%s" % field, tqMatchDict[ field ] ) ) #Match multi value fields for field in self.__multiValueMatchFields: #It has to be %ss , with an 's' at the end because the columns names # are plural and match options are singular if field in tqMatchDict and tqMatchDict[ field ]: tableN, fullTableN = self.__generateTablesName( sqlTables, field ) sqlMultiCondList = [] # if field != 'GridCE' or 'Site' in tqMatchDict: # Jobs for masked sites can be matched if they specified a GridCE # Site is removed from tqMatchDict if the Site is mask. In this case we want # that the GridCE matches explicitly so the COUNT can not be 0. In this case we skip this # condition sqlMultiCondList.append( "( SELECT COUNT(%s.Value) FROM %s WHERE %s.TQId = tq.TQId ) = 0" % ( fullTableN, fullTableN, fullTableN ) ) if field in self.__tagMatchFields: csql = self.__generateTagSQLSubCond( fullTableN, tqMatchDict[field] ) else: csql = self.__generateSQLSubCond( "%%s IN ( SELECT %s.Value FROM %s WHERE %s.TQId = tq.TQId )" % ( fullTableN, fullTableN, fullTableN ), tqMatchDict[ field ] ) sqlMultiCondList.append( csql ) sqlCondList.append( "( %s )" % " OR ".join( sqlMultiCondList ) ) #In case of Site, check it's not in job banned sites if field in self.__bannedJobMatchFields: fullTableN = '`tq_TQToBanned%ss`' % field csql = self.__generateSQLSubCond( "%%s not in ( SELECT %s.Value FROM %s WHERE %s.TQId = tq.TQId )" % ( fullTableN, fullTableN, fullTableN ), tqMatchDict[ field ], boolOp = 'OR' ) sqlCondList.append( csql ) #Resource banning bannedField = "Banned%s" % field if bannedField in tqMatchDict and tqMatchDict[ bannedField ]: fullTableN = '`tq_TQTo%ss`' % field csql = self.__generateSQLSubCond( "%%s not in ( SELECT %s.Value FROM %s WHERE %s.TQId = tq.TQId )" % ( fullTableN, fullTableN, fullTableN ), tqMatchDict[ bannedField ], boolOp = 'OR' ) sqlCondList.append( csql ) #For certain fields, the require is strict. If it is not in the tqMatchDict, the job cannot require it for field in self.__strictRequireMatchFields: if field in tqMatchDict: continue fullTableN = '`tq_TQTo%ss`' % field sqlCondList.append( "( SELECT COUNT(%s.Value) FROM %s WHERE %s.TQId = tq.TQId ) = 0" % ( fullTableN, fullTableN, fullTableN ) ) # Add extra conditions if negativeCond: sqlCondList.append( self.__generateNotSQL( sqlTables, negativeCond ) ) #Generate the final query string tqSqlCmd = "SELECT tq.TQId, tq.OwnerDN, tq.OwnerGroup FROM `tq_TaskQueues` tq WHERE %s" % ( " AND ".join( sqlCondList ) ) #Apply priorities tqSqlCmd = "%s ORDER BY RAND() / tq.Priority ASC" % tqSqlCmd #Do we want a limit? if numQueuesToGet: tqSqlCmd = "%s LIMIT %s" % ( tqSqlCmd, numQueuesToGet ) return S_OK( tqSqlCmd ) def __generateTagSQLSubCond( self, tableName, tagMatchList ): """ Generate SQL condition where ALL the specified multiValue requirements must be present in the matching resource list """ sql1 = "SELECT COUNT(%s.Value) FROM %s WHERE %s.TQId=tq.TQId" % ( tableName, tableName, tableName ) if type( tagMatchList ) in [types.ListType, types.TupleType]: sql2 = sql1 + " AND %s.Value in ( %s )" % ( tableName, ','.join( [ "%s" % v for v in tagMatchList] ) ) else: sql2 = sql1 + " AND %s.Value=%s" % ( tableName, tagMatchList ) sql = '( '+sql1+' ) = ('+sql2+' )' return sql def deleteJob( self, jobId, connObj = False ): """ Delete a job from the task queues Return S_OK( True/False ) / S_ERROR """ if not connObj: retVal = self._getConnection() if not retVal[ 'OK' ]: return S_ERROR( "Can't delete job: %s" % retVal[ 'Message' ] ) connObj = retVal[ 'Value' ] retVal = self._query( "SELECT t.TQId, t.OwnerDN, t.OwnerGroup FROM `tq_TaskQueues` t, `tq_Jobs` j WHERE j.JobId = %s AND t.TQId = j.TQId" % jobId, conn = connObj ) if not retVal[ 'OK' ]: return S_ERROR( "Could not get job from task queue %s: %s" % ( jobId, retVal[ 'Message' ] ) ) data = retVal[ 'Value' ] if not data: return S_OK( False ) tqId, tqOwnerDN, tqOwnerGroup = data[0] self.log.info( "Deleting job %s" % jobId ) retVal = self._update( "DELETE FROM `tq_Jobs` WHERE JobId = %s" % jobId, conn = connObj ) if not retVal[ 'OK' ]: return S_ERROR( "Could not delete job from task queue %s: %s" % ( jobId, retVal[ 'Message' ] ) ) result = retVal[ 'Value' ] if retVal[ 'Value' ] == 0: #No job deleted return S_OK( False ) retries = 10 #Always return S_OK() because job has already been taken out from the TQ self.__deleteTQWithDelay.add( tqId, 300, ( tqId, tqOwnerDN, tqOwnerGroup ) ) return S_OK( True ) def getTaskQueueForJob( self, jobId, connObj = False ): """ Return TaskQueue for a given Job Return S_OK( [TaskQueueID] ) / S_ERROR """ if not connObj: retVal = self._getConnection() if not retVal[ 'OK' ]: return S_ERROR( "Can't get TQ for job: %s" % retVal[ 'Message' ] ) connObj = retVal[ 'Value' ] retVal = self._query( 'SELECT TQId FROM `tq_Jobs` WHERE JobId = %s ' % jobId, conn = connObj ) if not retVal[ 'OK' ]: return retVal if not retVal['Value']: return S_ERROR( 'Not in TaskQueues' ) return S_OK( retVal['Value'][0][0] ) def getTaskQueueForJobs( self, jobIDs, connObj = False ): """ Return TaskQueues for a given list of Jobs """ if not connObj: retVal = self._getConnection() if not retVal[ 'OK' ]: return S_ERROR( "Can't get TQs for a job list: %s" % retVal[ 'Message' ] ) connObj = retVal[ 'Value' ] jobString = ','.join( [ str( x ) for x in jobIDs ] ) retVal = self._query( 'SELECT JobId,TQId FROM `tq_Jobs` WHERE JobId in (%s) ' % jobString, conn = connObj ) if not retVal[ 'OK' ]: return retVal if not retVal['Value']: return S_ERROR( 'Not in TaskQueues' ) resultDict = {} for jobID, TQID in retVal['Value']: resultDict[int( jobID )] = int( TQID ) return S_OK( resultDict ) def __getOwnerForTaskQueue( self, tqId, connObj = False ): retVal = self._query( "SELECT OwnerDN, OwnerGroup from `tq_TaskQueues` WHERE TQId=%s" % tqId, conn = connObj ) if not retVal[ 'OK' ]: return retVal data = retVal[ 'Value' ] if len( data ) == 0: return S_OK( False ) return S_OK( retVal[ 'Value' ][0] ) def __deleteTQIfEmpty( self, args ): ( tqId, tqOwnerDN, tqOwnerGroup ) = args retries = 3 while retries: retries -= 1 result = self.deleteTaskQueueIfEmpty( tqId, tqOwnerDN, tqOwnerGroup ) if result[ 'OK' ]: return gLogger.error( "Could not delete TQ %s: %s" % ( tqId, result[ 'Message' ] ) ) def deleteTaskQueueIfEmpty( self, tqId, tqOwnerDN = False, tqOwnerGroup = False, connObj = False ): """ Try to delete a task queue if its empty """ if not connObj: retVal = self._getConnection() if not retVal[ 'OK' ]: return S_ERROR( "Can't insert job: %s" % retVal[ 'Message' ] ) connObj = retVal[ 'Value' ] if not tqOwnerDN or not tqOwnerGroup: retVal = self.__getOwnerForTaskQueue( tqId, connObj = connObj ) if not retVal[ 'OK' ]: return retVal data = retVal[ 'Value' ] if not data: return S_OK( False ) tqOwnerDN, tqOwnerGroup = data sqlCmd = "DELETE FROM `tq_TaskQueues` WHERE Enabled >= 1 AND `tq_TaskQueues`.TQId = %s" % tqId sqlCmd = "%s AND `tq_TaskQueues`.TQId not in ( SELECT DISTINCT TQId from `tq_Jobs` )" % sqlCmd retVal = self._update( sqlCmd, conn = connObj ) if not retVal[ 'OK' ]: return S_ERROR( "Could not delete task queue %s: %s" % ( tqId, retVal[ 'Message' ] ) ) delTQ = retVal[ 'Value' ] if delTQ > 0: for mvField in self.__multiValueDefFields: retVal = self._update( "DELETE FROM `tq_TQTo%s` WHERE TQId = %s" % ( mvField, tqId ), conn = connObj ) if not retVal[ 'OK' ]: return retVal self.recalculateTQSharesForEntity( tqOwnerDN, tqOwnerGroup, connObj = connObj ) self.log.info( "Deleted empty and enabled TQ %s" % tqId ) return S_OK( True ) return S_OK( False ) def deleteTaskQueue( self, tqId, tqOwnerDN = False, tqOwnerGroup = False, connObj = False ): """ Try to delete a task queue even if it has jobs """ self.log.info( "Deleting TQ %s" % tqId ) if not connObj: retVal = self._getConnection() if not retVal[ 'OK' ]: return S_ERROR( "Can't insert job: %s" % retVal[ 'Message' ] ) connObj = retVal[ 'Value' ] if not tqOwnerDN or not tqOwnerGroup: retVal = self.__getOwnerForTaskQueue( tqId, connObj = connObj ) if not retVal[ 'OK' ]: return retVal data = retVal[ 'Value' ] if not data: return S_OK( False ) tqOwnerDN, tqOwnerGroup = data sqlCmd = "DELETE FROM `tq_TaskQueues` WHERE `tq_TaskQueues`.TQId = %s" % tqId retVal = self._update( sqlCmd, conn = connObj ) if not retVal[ 'OK' ]: return S_ERROR( "Could not delete task queue %s: %s" % ( tqId, retVal[ 'Message' ] ) ) delTQ = retVal[ 'Value' ] sqlCmd = "DELETE FROM `tq_Jobs` WHERE `tq_Jobs`.TQId = %s" % tqId retVal = self._update( sqlCmd, conn = connObj ) if not retVal[ 'OK' ]: return S_ERROR( "Could not delete task queue %s: %s" % ( tqId, retVal[ 'Message' ] ) ) for mvField in self.__multiValueDefFields: retVal = self._update( "DELETE FROM `tq_TQTo%s` WHERE TQId = %s" % tqId, conn = connObj ) if not retVal[ 'OK' ]: return retVal if delTQ > 0: self.recalculateTQSharesForEntity( tqOwnerDN, tqOwnerGroup, connObj = connObj ) return S_OK( True ) return S_OK( False ) def getMatchingTaskQueues( self, tqMatchDict, negativeCond = False ): """ rename to have the same method as exposed in the Matcher """ return self.retrieveTaskQueuesThatMatch( tqMatchDict, negativeCond = negativeCond ) def getNumTaskQueues( self ): """ Get the number of task queues in the system """ sqlCmd = "SELECT COUNT( TQId ) FROM `tq_TaskQueues`" retVal = self._query( sqlCmd ) if not retVal[ 'OK' ]: return retVal return S_OK( retVal[ 'Value' ][0][0] ) def retrieveTaskQueuesThatMatch( self, tqMatchDict, negativeCond = False ): """ Get the info of the task queues that match a resource """ result = self.matchAndGetTaskQueue( tqMatchDict, numQueuesToGet = 0, negativeCond = negativeCond ) if not result[ 'OK' ]: return result return self.retrieveTaskQueues( [ tqTuple[0] for tqTuple in result[ 'Value' ] ] ) def retrieveTaskQueues( self, tqIdList = False ): """ Get all the task queues """ sqlSelectEntries = [ "`tq_TaskQueues`.TQId", "`tq_TaskQueues`.Priority", "COUNT( `tq_Jobs`.TQId )" ] sqlGroupEntries = [ "`tq_TaskQueues`.TQId", "`tq_TaskQueues`.Priority" ] for field in self.__singleValueDefFields: sqlSelectEntries.append( "`tq_TaskQueues`.%s" % field ) sqlGroupEntries.append( "`tq_TaskQueues`.%s" % field ) sqlCmd = "SELECT %s FROM `tq_TaskQueues`, `tq_Jobs`" % ", ".join( sqlSelectEntries ) sqlTQCond = "" if tqIdList != False: if len( tqIdList ) == 0: return S_OK( {} ) else: sqlTQCond += " AND `tq_TaskQueues`.TQId in ( %s )" % ", ".join( [ str( id ) for id in tqIdList ] ) sqlCmd = "%s WHERE `tq_TaskQueues`.TQId = `tq_Jobs`.TQId %s GROUP BY %s" % ( sqlCmd, sqlTQCond, ", ".join( sqlGroupEntries ) ) retVal = self._query( sqlCmd ) if not retVal[ 'OK' ]: return S_ERROR( "Can't retrieve task queues info: %s" % retVal[ 'Message' ] ) tqData = {} for record in retVal[ 'Value' ]: tqId = record[0] tqData[ tqId ] = { 'Priority' : record[1], 'Jobs' : record[2] } record = record[3:] for iP in range( len( self.__singleValueDefFields ) ): tqData[ tqId ][ self.__singleValueDefFields[ iP ] ] = record[ iP ] tqNeedCleaning = False for field in self.__multiValueDefFields: table = "`tq_TQTo%s`" % field sqlCmd = "SELECT %s.TQId, %s.Value FROM %s" % ( table, table, table ) retVal = self._query( sqlCmd ) if not retVal[ 'OK' ]: return S_ERROR( "Can't retrieve task queues field % info: %s" % ( field, retVal[ 'Message' ] ) ) for record in retVal[ 'Value' ]: tqId = record[0] value = record[1] if not tqId in tqData: if tqIdList == False or tqId in tqIdList: self.log.warn( "Task Queue %s is defined in field %s but does not exist, triggering a cleaning" % ( tqId, field ) ) tqNeedCleaning = True else: if field not in tqData[ tqId ]: tqData[ tqId ][ field ] = [] tqData[ tqId ][ field ].append( value ) if tqNeedCleaning: self.cleanOrphanedTaskQueues() return S_OK( tqData ) def __updateGlobalShares( self ): """ Update internal structure for shares """ #Update group shares self.__groupShares = self.getGroupShares() #Apply corrections if enabled if self.isSharesCorrectionEnabled(): result = self.getGroupsInTQs() if not result[ 'OK' ]: self.log.error( "Could not get groups in the TQs", result[ 'Message' ] ) activeGroups = result[ 'Value' ] newShares = {} for group in activeGroups: if group in self.__groupShares: newShares[ group ] = self.__groupShares[ group ] newShares = self.__sharesCorrector.correctShares( newShares ) for group in self.__groupShares: if group in newShares: self.__groupShares[ group ] = newShares[ group ] def recalculateTQSharesForAll( self ): """ Recalculate all priorities for TQ's """ if self.isSharesCorrectionEnabled(): self.log.info( "Updating correctors state" ) self.__sharesCorrector.update() self.__updateGlobalShares() self.log.info( "Recalculating shares for all TQs" ) retVal = self._getConnection() if not retVal[ 'OK' ]: return S_ERROR( "Can't insert job: %s" % retVal[ 'Message' ] ) connObj = retVal[ 'Value' ] result = self._query( "SELECT DISTINCT( OwnerGroup ) FROM `tq_TaskQueues`" ) if not result[ 'OK' ]: return result for group in [ r[0] for r in result[ 'Value' ] ]: self.recalculateTQSharesForEntity( "all", group ) return S_OK() def recalculateTQSharesForEntity( self, userDN, userGroup, connObj = False ): """ Recalculate the shares for a userDN/userGroup combo """ self.log.info( "Recalculating shares for %s@%s TQs" % ( userDN, userGroup ) ) if userGroup in self.__groupShares: share = self.__groupShares[ userGroup ] else: share = float( DEFAULT_GROUP_SHARE ) if Properties.JOB_SHARING in CS.getPropertiesForGroup( userGroup ): #If group has JobSharing just set prio for that entry, userDN is irrelevant return self.__setPrioritiesForEntity( userDN, userGroup, share, connObj = connObj ) selSQL = "SELECT OwnerDN, COUNT(OwnerDN) FROM `tq_TaskQueues` WHERE OwnerGroup='%s' GROUP BY OwnerDN" % ( userGroup ) result = self._query( selSQL, conn = connObj ) if not result[ 'OK' ]: return result #Get owners in this group and the amount of times they appear data = [ ( r[0], r[1] ) for r in result[ 'Value' ] if r ] numOwners = len( data ) #If there are no owners do now if numOwners == 0: return S_OK() #Split the share amongst the number of owners share /= numOwners entitiesShares = dict( [ ( row[0], share ) for row in data ] ) #If corrector is enabled let it work it's magic if self.isSharesCorrectionEnabled(): entitiesShares = self.__sharesCorrector.correctShares( entitiesShares, group = userGroup ) #Keep updating owners = dict( data ) #IF the user is already known and has more than 1 tq, the rest of the users don't need to be modified #(The number of owners didn't change) if userDN in owners and owners[ userDN ] > 1: return self.__setPrioritiesForEntity( userDN, userGroup, entitiesShares[ userDN ], connObj = connObj ) #Oops the number of owners may have changed so we recalculate the prio for all owners in the group for userDN in owners: self.__setPrioritiesForEntity( userDN, userGroup, entitiesShares[ userDN ], connObj = connObj ) return S_OK() def __setPrioritiesForEntity( self, userDN, userGroup, share, connObj = False, consolidationFunc = "AVG" ): """ Set the priority for a userDN/userGroup combo given a splitted share """ self.log.info( "Setting priorities to %s@%s TQs" % ( userDN, userGroup ) ) tqCond = [ "t.OwnerGroup='%s'" % userGroup ] allowBgTQs = gConfig.getValue( "/Registry/Groups/%s/AllowBackgroundTQs" % userGroup, False ) if Properties.JOB_SHARING not in CS.getPropertiesForGroup( userGroup ): tqCond.append( "t.OwnerDN='%s'" % userDN ) tqCond.append( "t.TQId = j.TQId" ) if consolidationFunc == 'AVG': selectSQL = "SELECT j.TQId, SUM( j.RealPriority )/COUNT(j.RealPriority) FROM `tq_TaskQueues` t, `tq_Jobs` j WHERE " elif consolidationFunc == 'SUM': selectSQL = "SELECT j.TQId, SUM( j.RealPriority ) FROM `tq_TaskQueues` t, `tq_Jobs` j WHERE " else: return S_ERROR( "Unknown consolidation func %s for setting priorities" % consolidationFunc ) selectSQL += " AND ".join( tqCond ) selectSQL += " GROUP BY t.TQId" result = self._query( selectSQL, conn = connObj ) if not result[ 'OK' ]: return result tqDict = dict( result[ 'Value' ] ) if len( tqDict ) == 0: return S_OK() #Calculate Sum of priorities totalPrio = 0 for k in tqDict: if tqDict[k] > 0.1 or not allowBgTQs: totalPrio += tqDict[ k ] #Update prio for each TQ for tqId in tqDict: if tqDict[ tqId ] > 0.1 or not allowBgTQs: prio = ( share / totalPrio ) * tqDict[ tqId ] else: prio = TQ_MIN_SHARE prio = max( prio, TQ_MIN_SHARE ) tqDict[ tqId ] = prio #Generate groups of TQs that will have the same prio=sum(prios) maomenos result = self.retrieveTaskQueues( list( tqDict ) ) if not result[ 'OK' ]: return result allTQsData = result[ 'Value' ] tqGroups = {} for tqid in allTQsData: tqData = allTQsData[ tqid ] for field in ( 'Jobs', 'Priority' ) + self.__priorityIgnoredFields: if field in tqData: tqData.pop( field ) tqHash = [] for f in sorted( tqData ): tqHash.append( "%s:%s" % ( f, tqData[ f ] ) ) tqHash = "|".join( tqHash ) if tqHash not in tqGroups: tqGroups[ tqHash ] = [] tqGroups[ tqHash ].append( tqid ) tqGroups = [ tqGroups[ td ] for td in tqGroups ] #Do the grouping for tqGroup in tqGroups: totalPrio = 0 if len( tqGroup ) < 2: continue for tqid in tqGroup: totalPrio += tqDict[ tqid ] for tqid in tqGroup: tqDict[ tqid ] = totalPrio #Group by priorities prioDict = {} for tqId in tqDict: prio = tqDict[ tqId ] if prio not in prioDict: prioDict[ prio ] = [] prioDict[ prio ].append( tqId ) #Execute updates for prio in prioDict: tqList = ", ".join( [ str( tqId ) for tqId in prioDict[ prio ] ] ) updateSQL = "UPDATE `tq_TaskQueues` SET Priority=%.4f WHERE TQId in ( %s )" % ( prio, tqList ) self._update( updateSQL, conn = connObj ) return S_OK() def getGroupShares( self ): """ Get all the shares as a DICT """ result = gConfig.getSections( "/Registry/Groups" ) if result[ 'OK' ]: groups = result[ 'Value' ] else: groups = [] shares = {} for group in groups: shares[ group ] = gConfig.getValue( "/Registry/Groups/%s/JobShare" % group, DEFAULT_GROUP_SHARE ) return shares def propagateTQSharesIfChanged( self ): """ If the shares have changed in the CS, recalculate priorities """ shares = self.getGroupShares() if shares == self.__groupShares: return S_OK() self.__groupShares = shares return self.recalculateTQSharesForAll() def modifyJobsPriorities( self, jobPrioDict ): """ Modify the priority for some jobs """ for jId in jobPrioDict: jobPrioDict[jId] = int( jobPrioDict[jId] ) maxJobsInQuery = 1000 jobsList = sorted( jobPrioDict ) prioDict = {} for jId in jobsList: prio = jobPrioDict[ jId ] if not prio in prioDict: prioDict[ prio ] = [] prioDict[ prio ].append( str( jId ) ) updated = 0 for prio in prioDict: jobsList = prioDict[ prio ] for i in range( maxJobsInQuery, 0, len( jobsList ) ): jobs = ",".join( jobsList[ i : i + maxJobsInQuery ] ) updateSQL = "UPDATE `tq_Jobs` SET `Priority`=%s, `RealPriority`=%f WHERE `JobId` in ( %s )" % ( prio, self.__hackJobPriority( prio ), jobs ) result = self._update( updateSQL ) if not result[ 'OK' ]: return result updated += result[ 'Value' ] if not updated: return S_OK() return self.recalculateTQSharesForAll()
avedaee/DIRAC
WorkloadManagementSystem/DB/TaskQueueDB.py
Python
gpl-3.0
53,814
[ "DIRAC" ]
5cb81e223bef4571046f3532893af28cc050cc5621850a5c2571313f66d21afb
from aces.materials.POSCAR import structure as Material class structure(Material): def getPOSCAR(self): return self.getMinimized() return """POSCAR file written by OVITO 1 14.987425 0.0 -4.3867412 0.0 4.418356 0.0 0.0 0.0 11.0396528 Bi I 8 8 Cartesian 12.2848625 1.0 2.5330513 2.9865892 1.0 8.1658306 12.0008354 1.0 -0.5525713 2.7025626 1.0 5.0802088 4.7911501 3.209178 4.7264219 10.4803019 3.209178 5.9724598 4.5071235 3.209178 1.6407995 10.1962748 3.209178 2.886838 0.7677309 3.209178 8.1967821 14.2196942 3.209178 -0.5835228 2.2998202 1.0 1.231761 12.687604 1.0 6.3814974 8.2614432 1.0 6.0034113 6.7259817 1.0 1.6098471 9.7935334 3.209178 -0.9616098 5.193892 3.209178 8.5748673 """ def getMinimized(self): return """ACES POSCAR 1.00000000000000 15.8697168905535229 0.0000000000000000 -3.8548710394516776 0.0000000000000000 4.3850706242184785 0.0000000000000000 0.3835877430558720 0.0000000000000000 11.0512463481755834 Bi I 8 8 Direct 0.8158938622792704 0.2263285258139973 0.5496040258995569 0.1934869726674740 0.2263285258139973 0.8200218893829293 0.8065130006435248 0.2263285258139973 0.2669688521120722 0.1841061443937278 0.2263285258139973 0.5373868013184457 0.3158938689512739 0.7263285258139973 0.5496040285515511 0.6934869860124755 0.7263285258139973 0.8200218765689306 0.3065130406765257 0.7263285258139973 0.2669688861370719 0.6841061243767299 0.7263285258139973 0.5373867752474462 0.0545237556102023 0.7263285258139973 0.7692801652907422 0.9454762510617939 0.7263285258139973 0.3177105894612587 0.1681736888909342 0.2263285258139973 0.1861781852961834 0.8318262577310632 0.2263285258139973 0.9008124640698176 0.5545237422662027 0.2263285258139973 0.7692801418717368 0.4454762510617938 0.2263285258139973 0.3177105260532634 0.6681737355969334 0.7263285258139973 0.1861781857391855 0.3318262910920680 0.7263285258139973 0.9008124139188161 """
vanceeasleaf/aces
aces/materials/Bi8I8.py
Python
gpl-2.0
2,024
[ "OVITO" ]
d77f1732375047a2da6a0a24f8637cb6ee410cb0313ad6c725edaed52b2ba301
"""Module for preparing data from NCBI. Most low layer module for manipulating data.""" import os import pickle from collections import defaultdict from Bio import Entrez from Bio import SeqIO # TODO: move to init CACHE_DIR = "../../Diploma/cache" if not os.path.isdir(CACHE_DIR): CACHE_DIR = "cache/" if not os.path.isdir(CACHE_DIR): os.makedirs(CACHE_DIR) # ************ NCBI RECORD OPERATIONS ************ # def get_gids(term="Viruses[Organism] AND srcdb_refseq[PROP] AND complete_genome"): """ Get genome IDs for given search term. :param term: search term for NCBI query :return: list of genome IDs for given term """ # term = "Viruses[Organism] AND srcdb_refseq[PROP] AND complete_genome" handle = Entrez.esearch(db="nucleotide", term=term, retmax=100000) record = Entrez.read(handle) id_list = sorted(set(record["IdList"])) print((record["Count"], len(record["IdList"]), len(id_list))) return id_list def get_rec(rec_id): """ Get record for given genome id. :param rec_id: genome id :return: record """ try: rec = pickle.load(open(os.path.join(CACHE_DIR, "%s.pkl.gz" % rec_id), "rb")) except IOError: # , FileNotFoundError: print(("downloading sequence id:", rec_id)) handle = Entrez.efetch(db="nucleotide", rettype="gb", id=rec_id) rec = SeqIO.read(handle, "gb") handle.close() pickle.dump(rec, open(os.path.join(CACHE_DIR, "%s.pkl.gz" % rec_id), "wb"), -1) print(("genome size:", len(rec.seq), rec.seq[:20] + "...")) print(("Taxonomy:", rec.annotations['taxonomy'])) for a, t in list(rec.annotations.items()): print((" %s: %s" % (a, str(t)[:15]))) print() return rec def get_gene(rec): """ Get record and return gene sequence. :param rec: record :return: gene sequence """ sequence = "" for f in rec.features: if f.type == "gene": start = f.location.nofuzzy_start end = f.location.nofuzzy_end if f.location.strand == 1: sequence += rec.seq[start:end] else: # ?? sequence += rec.seq[start:end].complement() return str(sequence) def load_oid_seq_classification(ids): """ Build dictionary of sequences and taxonomies for every genome ID. :param ids: genome IDs :return: sequences and taxonomy annotations dictionaries for every genome ID """ seq = defaultdict(list) tax = {} for oid in ids: rec = get_rec(oid) seq[oid] = str(rec.seq) tax[oid] = ';'.join(rec.annotations["taxonomy"]) return seq, tax # ************ TAXONOMY OPERATIONS ************ # def rec_dd(): """ Create dictionary of dictionaries to 'simulate' tree. :return: dictionary of dictionaries """ return defaultdict(rec_dd) def update_taxonomy(taxonomy, tax_path, genome_id): """ Create dictionary with taxonomy name and IDs of sequences which belongs to specific taxonomy. :param taxonomy: current taxonomy :param tax_path: taxonomy path :param genome_id: genome_id :return: updated taxonomy """ if not tax_path: return taxonomy tax = tax_path[0].lower() if tax in taxonomy: # check if tax in taxonomy and update # temp_taxonomy[tax]["data"].append(seq_record.annotations["gi"]) taxonomy[tax]["data"].append(genome_id) # taxonomy[tax]["data"].append(get_gene(rec)) update_taxonomy(taxonomy[tax], tax_path[1:], genome_id) else: # temp_taxonomy[tax] = {"data": list({seq_record.annotations["gi"]})} taxonomy[tax] = {"data": list({genome_id})} # taxonomy[tax] = dict({"data": list({get_gene(rec)})}) temp = update_taxonomy(taxonomy[tax], tax_path[1:], genome_id) if len(temp) > 1: # 1 = data, 2 = data + key taxonomy = temp return taxonomy def filter_classification(rec, to_filter): """ Check if record is in filter list. :param rec: record :param to_filter: filter list :return: bool """ in_to_filter = False for temp_tax in rec.annotations["taxonomy"]: temp_tax = temp_tax.lower().split() for temp_tax_el in temp_tax: if temp_tax_el in to_filter: in_to_filter = True print("filtered ", rec.annotations["taxonomy"]) return in_to_filter def print_nice(taxonomy, level=0): """ Print taxonomy with tabs. :param taxonomy: taxonomy :param level: current level :return: """ for i in sorted(taxonomy.keys()): if i == "data": if len(taxonomy) == 1: return else: continue else: print(level * "\t", i.replace("->", "", 1), len(taxonomy[i]["data"])) print_nice(taxonomy[i], level + 1) def load_whole_taxonomy(): """ Build taxonomy and get list ids and labels. :return: data, label """ taxonomy = get_taxonomy(get_gids()) list_nodes = get_list_nodes_ids_labels(taxonomy) data, labels = list(zip(*list_nodes)) for label in labels: print(label) label_number = -1 temp_l = [] label_n = [] for l in labels: if l not in temp_l: temp_l.append(l) label_number += 1 label_n.append(label_number) return data, label_n def get_taxonomy(id_list, count=-1): # call: python get_viral_sequence.py>log.out 2>log.err # all virus sequences # term = "Viruses[Organism] NOT srcdb_refseq[PROP] NOT cellular organisms[ORGN] AND # nuccore genome samespecies[Filter] NOT nuccore genome[filter] NOT gbdiv syn[prop]" # only reference (refSEQ) virues sequences # see distinction between the two, here: # http://www.ncbi.nlm.nih.gov/genomes/GenomesHome.cgi?taxid=10239&hopt=faq """ Build taxonomy from Entrez search. :param id_list: list of genome ids we want to build taxonomy tree from :param count: how many elements we want in taxonomy; -1 means whole taxonomy :return: taxonomy """ taxonomy = rec_dd() temp_count = 1 for genome_id in id_list: try: rec = get_rec(genome_id) in_filter = filter_classification(rec, list({"bacteria", "unclassified", "unassigned"})) if not in_filter: update_taxonomy(taxonomy, rec.annotations["taxonomy"], genome_id) if count != -1: if temp_count == count: break temp_count += 1 except IOError as e: # efetch - Raises an IOError exception if there's a network error. # http://biopython.org/DIST/docs/api/Bio.Entrez-module.html print("IOError raised...") print(e) except ValueError as v: # http: // biopython.org / DIST / docs / api / Bio.SeqIO - module.html # read print("problems with handling SeqIO...") print(v) except pickle.PicklingError as p: # https://docs.python.org/2/library/pickle.html#pickle.PicklingError print("problems with pickling object...") print(p) return taxonomy def remove_small_nodes(taxonomy, threshold_size=100): """ Remove small nodes from dataset. :param taxonomy: input taxonomy :param threshold_size: how many nodes do parent need to keep it :return: output taxonomy """ if isinstance(taxonomy, (defaultdict, dict)): taxonomy_keys = [x for x in list(taxonomy.keys()) if x != "data"] for i in taxonomy_keys: print(i, len(taxonomy[i]['data'])) if len(taxonomy[i]['data']) < threshold_size: taxonomy.pop(i) else: remove_small_nodes(taxonomy[i]) else: return taxonomy # ************ LIST OPERATIONS ************ # def remove_lists(taxonomy): """ Remove all list nodes from taxonomy. :param taxonomy: taxonomy :return: taxonomy """ # check for recurse exit if isinstance(taxonomy, (defaultdict, dict)): for i in [x for x in list(taxonomy.keys()) if x != "data"]: if set(taxonomy[i]) == set(list({"data"})): # if parent has only one list node, remove it # if len([x for x in taxonomy.keys() if x != "data"]) == 1: taxonomy.pop(i) continue else: remove_lists(taxonomy[i]) else: return taxonomy def get_list_nodes_unique(taxonomy, parent=""): """ Get taxonomy and return unique list nodes. :param taxonomy: taxonomy :param parent: parent of current node :return: unique list nodes """ # checked by hand and it works as expected list_nodes = list() keys = [x for x in list(taxonomy.keys()) if x != "data"] for i in keys: if set(taxonomy[i]) == set(list({"data"})): list_nodes.append(i) else: list_nodes += get_list_nodes_unique(taxonomy[i], parent + "->" + i) return list_nodes def count_list_nodes(taxonomy): """ Count list nodes and return sum. :param taxonomy: taxonomy :return: int """ count = 0 keys = [x for x in list(taxonomy.keys()) if x != "data"] for i in keys: if set(taxonomy[i]) == set(list({"data"})): if i == keys[-1]: count += 1 return count else: count += 1 else: count += count_list_nodes(taxonomy[i]) return count def get_list_nodes_ids_labels(taxonomy): """ Get taxonomy and return tuples of all list nodes. :param taxonomy: taxonomy :return: list of tuples (id, class) """ if len(list(taxonomy.keys())) > 1 or list(taxonomy.keys()) == ["viruses"]: temp = [] for k in [x for x in list(taxonomy.keys()) if x != "data"]: temp += get_list_nodes_ids_labels(taxonomy[k]) return temp # else: # return [(x, parent) for x in taxonomy["data"]] # ************ ALL NODES OPERATIONS ************ # def count_examples(taxonomy): """ Get taxonomy, count examples in every node and return sum. :param taxonomy: taxonomy :return: sum of examples """ count = 0 keys = [x for x in list(taxonomy.keys()) if x != "data"] for i in keys: if set(taxonomy[i]) == set(list({"data"})): if i == keys[-1]: count += len(taxonomy[i]["data"]) return count else: count += len(taxonomy[i]["data"]) else: count += count_examples(taxonomy[i]) return count def get_all_nodes(taxonomy, parent=""): """ Get taxonomy and return all nodes (including list nodes). :param parent: parent of current node - default "" :param taxonomy: taxonomy :return: all nodes """ all_nodes = list() keys = [x for x in list(taxonomy.keys()) if x != "data"] for i in keys: # if we want all non-list nodes, than this stays, otherwise comment this # if len([x for x in taxonomy[i].keys() if x != "data"]) == 0: # continue if i == "rest": all_nodes.append(parent + "->" + i) else: all_nodes.append(i) all_nodes += get_all_nodes(taxonomy[i], i) return all_nodes # ************ OTHER ************ # def load_seqs_from_ncbi(seq_len=100, skip_read=0, overlap=50, taxonomy_el_count=-1): """ Load sequences from NCBI database. Prepare sequences, sliced to seq_len length. Skip every skip_read and overlap two reads with overlap nucleotides. Overlap 50 means that half of the read is going to be overlapped with next read. If seq_len is -1, load whole sequences (do not strip them) - usually using with fasta format as we slice sequences later. :param seq_len: read length :param skip_read: number of skipped reads :param overlap: overlapping nucleotides count :param taxonomy_el_count: how many elements we want in taxonomy; -1 means whole taxonomy :return: dictionary reads - each genome ID key contains list of reads for specific genome, dictionary taxonomy - each genome ID key contains taxonomy for specific genome """ data, _ = run(taxonomy_el_count) print("getting sequences...") seqs, tax = load_oid_seq_classification(data) reads = defaultdict(list) if seq_len != -1: for oid, seq in seqs.items(): while seq: if len(seq) < seq_len: # we don't want shorter sequences than seq_len (shorter than 100) break reads[oid].append(seq[:seq_len]) seq = seq[seq_len - overlap + ((seq_len - overlap) * skip_read):] else: reads = seqs return reads, tax def run(taxonomy_el_count=-1): """ Build taxonomy and get list ids and labels. :param taxonomy_el_count: how many elements we want in taxonomy; -1 means whole taxonomy :return: data, label """ taxonomy = get_taxonomy(get_gids(), count=taxonomy_el_count) # remove_lists(taxonomy) print_nice(taxonomy) remove_small_nodes(taxonomy, 100) print_nice(taxonomy) list_nodes = get_list_nodes_ids_labels(taxonomy) data, labels = list(zip(*list_nodes)) # for label in labels: # print label label_number = -1 temp_l = [] label_n = [] for l in labels: if l not in temp_l: temp_l.append(l) label_number += 1 label_n.append(label_number) return data, label_n if __name__ == "__main__": # a = load_seqs_from_ncbi(taxonomy_el_count=20) temp_taxonomy = get_taxonomy(get_gids()) print("no of examples after taxonomy was built: %d" % count_examples(temp_taxonomy)) print("no of list nodes after taxonomy was built: %d" % count_list_nodes(temp_taxonomy)) print_nice(temp_taxonomy) remove_small_nodes(temp_taxonomy, 100) # remove_lists(temp_taxonomy) print_nice(temp_taxonomy) run()
mkopar/Virus-classification-theano
VirClass/load_ncbi.py
Python
mit
14,243
[ "Biopython" ]
fb3bfd6e8431f720004f8b2284dc82540602aba7fc1d96d303e9b0b87350d186
""" Create a class that contains variables related to the crystal of a DFT simulation. """ import numpy as np from operator import mod def makeR(r): """Create a matrix of coordinate vectors from a single coordinate vector. Args: r (list or numpy.array): a single coordinate vector. Returns: (numpy.array): an array of permutations of the input coordinate vector. The coordinate vectors are the columns of R. Example: >>> from dft.poisson import R_matrix >>> r = [1,1,0] >>> R = R_matrix(r) """ return np.array([np.roll(r, i) for i in range(len(r))]) def gauss_charge_dist(r, pos, sigma): """Evaluate a charge distribution created from a Gaussian distribution at a point r. Args: r (list, numpy.ndarray): an array that represents a point in 3D, Euclidean space. pos (numpy.ndarray): the position of the Gaussian distribution. sigma (float): the standard deviation of the Gaussian distribution Return: (float): the value of the charge distribution at the provided location. Example: >>> from dft.poisson import gaussian_charge_dist >>> val = Gaussian_charge_distribution([.3, .3, .3], [0,0,0]) """ if np.shape(np.shape(r)) == (1,): n = -np.exp(-np.linalg.norm(r-pos)**2/(2*sigma**2))/( 2*np.pi*sigma**2)**(3./2) return n elif np.shape(np.shape(r)) == (2,): n = [] for ri in r: n.append(-np.exp(-np.linalg.norm(ri-pos)**2/(2*sigma**2))/( 2*np.pi*sigma**2)**(3./2)) return np.array(n) else: raise ValueError("Please provide a coordinate or array of coordinates.") class Crystal: """The purpose of this class is to store quantities related to the crystal of a DFT simulation. The default values will be those taken from Tomas Arias's lectures. Args: R (numpy.ndarray): an array of the vectors that define the supercell. S (list or numpy.ndarray): an array of the number of divisions along each direction defined by the lattice vectors. LC (float): lattice constant X (list or numpy.ndarray): a list of atomic positions inside the supercell. Z (float): the nuclear charge. Attributes: R (numpy.ndarray): an array of the vectors that define the supercell. S (list or numpy.ndarray): an array of the number of divisions along each direction defined by the lattice vectors. LC (float): lattice constant X (list or numpy.ndarray): a list of atomic positions inside the supercell. G (numpy.ndarray): an array of the reciprocal lattice vectors. Z (float): the nuclear charge. M (numpy.ndarray): a matrix of integers that give the positions of the grid points. N (numpy.ndarray): an integer matrix that contributes to the wave vector calculation. r (numpy.ndarray): an array of grid points with dimensions (N,3) where N is the number of grid points. volume (float): the volume of the supercell """ def __init__(self, R, S, LC, X=None, Z=None): self.R = R self.S = S self.LC = LC self.volume = np.linalg.det(R) self._G = None self.G() self._M = None self.M() self._N = None self.N() self._r = None self.r() self._X = X self.X() self._Z = Z self.Z() def M(self): """Calculate a matrix of integers that give the positions of the sampling points. Return: matrix (numpy.array): an array of integers with dimensions (N, 3) where N is the number of sampling points numpy.prod(S). """ if self._M is None: ndims = len(self.S) npts = np.product(self.S) self._M = np.empty([npts, ndims], dtype="int") for i in range(npts): self._M[i] = np.array([mod(i,self.S[0]), mod(np.floor(i/self.S[0]), self.S[1]), mod(np.floor( i/(self.S[0]*self.S[1]) ), self.S[2])]) return self._M else: return self._M def N(self): """Construct an integer matrix of the result the product of wave-vectors and sampling points. The wave vectors are chosen so that periodicity of the basis expansion is preserved. Args: M (numpy.arrray): is a matrix of integers that give the positions of the sampling points. This matrix can be created using M_matrix. Return: n_matrix (numpy.array): an array of integers with dimensions (N, 3) where N is the number of sampling points. """ if self._N is None: self._N = np.empty(np.shape(self._M), dtype="int") for i in range(np.shape(self._M)[0]): for j in range(np.shape(self._M)[1]): self._N[i,j] = self._M[i,j] - self.S[j] \ if (self._M[i,j] > self.S[j]/2) else self._M[i,j] return self._N else: return self._N def r(self): """Create a matrix of sampling point coordinates. Return: (numpy.ndarray): An array of sampling points with dimensions (N, 3) where N is the number of sampling points. """ if self._r is None: self._r = np.dot(self._M, np.dot(self.R, np.linalg.inv(np.diag(self.S)))) return self._r else: return self._r def G(self): """Return a matrix of reciprocal grid vectors. """ ndims = len(self.S) self._G = np.empty(np.shape(self.R)) for i in range(ndims): self._G[:,i] = 2*np.pi*np.cross( self.R[:,mod(i+1, ndims)], self.R[:, mod(i+2, ndims)])/self.volume return self._G def lattice_constant(self): if self._lattice_constant is None: self._lattice_constant = 6. return self._lattice_constant else: return self._lattice_constant def X(self): if self._X is None: self._X = [[0.,0.,0.],[1.75,0.,0.]] return self._X else: return self._X def Z(self): if self._Z is None: self._Z = [1., -1.] return self._Z else: return self._Z
jerjorg/dft
pydft/crystal.py
Python
gpl-3.0
6,774
[ "CRYSTAL", "Gaussian" ]
32ad0b0bd88dff97cd5d1409227d7f572646e3280092e90c562c17cb3ae1dacc
from scrapy.spider import BaseSpider from scrapy.selector import HtmlXPathSelector import os from myspider.items import CSDNItem class CSDNSpider(BaseSpider): name = "csdn" allowed_domains = ["blog.csdn.net"] start_urls = [] file = open("E:\GitFolder\VerticleSearchEngine\crawl\myspider\user.json") while 1: line = file.readline() if not line: break; #pos_s = line.find(":") + 4 #pos_e = line.find(', "url') - 2 #username = line[pos_s:pos_e] #if not os.path.exists('E://CSDN/' + str(username)): # os.mkdir('E://CSDN/' + str(username)) pos_h = line.find("http://") blogurl = line[pos_h:-5] for i in range(1, 10): start_urls.append(blogurl+"/article/list/"+str(i)) def parse(self, response): hxs = HtmlXPathSelector(response) items = [] #info_item = CSDNItem() #blog_title = hxs.select('//div[@id="blog_title"]/text()').extract() #user = hxs.select('//div[@id="blog_userface"]/span/a/text()').extract() #visit = hxs.select('//ul[@id="blog_rank"]/li[1]/span/text()').extract() #score = hxs.select('//ul[@id="blog_rank"]/li[2]/span/text()').extract() #ranking = hxs.select('//ul[@id="blog_rank"]/li[3]/span/text()').extract() #info_item['blog_title'] = blog_title #info_item['user'] = user #info_item['visit'] = visit #info_item['score'] = score #info_item['ranking'] = ranking #items.append(info_item) #tags = hxs.select('//div[@id="panel_Category"]/ul[2]/li') #for tag in tags: # tag_item = CSDNItem() # tag_item['tag'] = tag.select('a/text()').extract() # items.append(tag_item) articles = hxs.select('//span[@class="link_title"]') for article in articles: article_item = CSDNItem() #article_item['title'] = article.select('a/text()').extract() title = ''.join(article.select('a/text()').extract()) url = ''.join(article.select('a/@href').extract()) user = url[14:url.find("/article")] article_item['url'] = url article_item['user'] = hxs.select('//div[@id="blog_userface"]/span/a/text()').extract() #dirname = "E://CSDN/" + user + "/" + title #if not os.path.exists(dirname): # os.mkdir(dirname) items.append(article_item) return items
SchoolProjs/Gitsoo
gitsoo_academic/myspider/myspider/spiders/csdn_spider.py
Python
apache-2.0
2,461
[ "VisIt" ]
207505e8d8527248dba0511e996edcd388210787bf96cfb44f28646abd2ee178
from unittest import mock import numpy as np import pytest import hyperspy.api as hs from hyperspy.misc.utils import slugify from hyperspy.decorators import lazifyTestClass RTOL = 1E-6 class TestModelJacobians: def setup_method(self, method): s = hs.signals.Signal1D(np.zeros(1)) m = s.create_model() self.low_loss = 7. self.weights = 0.3 m.axis.axis = np.array([1, 0]) m.channel_switches = np.array([0, 1], dtype=bool) m.append(hs.model.components1D.Gaussian()) m[0].A.value = 1 m[0].centre.value = 2. m[0].sigma.twin = m[0].centre m._low_loss = mock.MagicMock() m.low_loss.return_value = self.low_loss self.model = m m.convolution_axis = np.zeros(2) def test_jacobian_not_convolved(self): m = self.model m.convolved = False jac = m._jacobian((1, 2, 3), None, weights=self.weights) np.testing.assert_array_almost_equal(jac.squeeze(), self.weights * np.array([m[0].A.grad(0), m[0].sigma.grad(0) + m[0].centre.grad(0)])) assert m[0].A.value == 1 assert m[0].centre.value == 2 assert m[0].sigma.value == 2 def test_jacobian_convolved(self): m = self.model m.convolved = True m.append(hs.model.components1D.Gaussian()) m[0].convolved = False m[1].convolved = True jac = m._jacobian((1, 2, 3, 4, 5), None, weights=self.weights) np.testing.assert_array_almost_equal(jac.squeeze(), self.weights * np.array([m[0].A.grad(0), m[0].sigma.grad(0) + m[0].centre.grad(0), m[1].A.grad(0) * self.low_loss, m[1].centre.grad(0) * self.low_loss, m[1].sigma.grad(0) * self.low_loss, ])) assert m[0].A.value == 1 assert m[0].centre.value == 2 assert m[0].sigma.value == 2 assert m[1].A.value == 3 assert m[1].centre.value == 4 assert m[1].sigma.value == 5 class TestModelCallMethod: def setup_method(self, method): s = hs.signals.Signal1D(np.empty(1)) m = s.create_model() m.append(hs.model.components1D.Gaussian()) m.append(hs.model.components1D.Gaussian()) self.model = m def test_call_method_no_convolutions(self): m = self.model m.convolved = False m[1].active = False r1 = m() r2 = m(onlyactive=True) np.testing.assert_allclose(m[0].function(0) * 2, r1) np.testing.assert_allclose(m[0].function(0), r2) m.convolved = True r1 = m(non_convolved=True) r2 = m(non_convolved=True, onlyactive=True) np.testing.assert_allclose(m[0].function(0) * 2, r1) np.testing.assert_allclose(m[0].function(0), r2) def test_call_method_with_convolutions(self): m = self.model m._low_loss = mock.MagicMock() m.low_loss.return_value = 0.3 m.convolved = True m.append(hs.model.components1D.Gaussian()) m[1].active = False m[0].convolved = True m[1].convolved = False m[2].convolved = False m.convolution_axis = np.array([0., ]) r1 = m() r2 = m(onlyactive=True) np.testing.assert_allclose(m[0].function(0) * 2.3, r1) np.testing.assert_allclose(m[0].function(0) * 1.3, r2) def test_call_method_binned(self): m = self.model m.convolved = False m.remove(1) m.signal.metadata.Signal.binned = True m.signal.axes_manager[-1].scale = 0.3 r1 = m() np.testing.assert_allclose(m[0].function(0) * 0.3, r1) class TestModelPlotCall: def setup_method(self, method): s = hs.signals.Signal1D(np.empty(1)) m = s.create_model() m.__call__ = mock.MagicMock() m.__call__.return_value = np.array([0.5, 0.25]) m.axis = mock.MagicMock() m.fetch_stored_values = mock.MagicMock() m.channel_switches = np.array([0, 1, 1, 0, 0], dtype=bool) self.model = m def test_model2plot_own_am(self): m = self.model m.axis.axis.shape = (5,) res = m._model2plot(m.axes_manager) np.testing.assert_array_equal( res, np.array([np.nan, 0.5, 0.25, np.nan, np.nan])) assert m.__call__.called assert ( m.__call__.call_args[1] == { 'non_convolved': False, 'onlyactive': True}) assert not m.fetch_stored_values.called def test_model2plot_other_am(self): m = self.model res = m._model2plot(m.axes_manager.deepcopy(), out_of_range2nans=False) np.testing.assert_array_equal(res, np.array([0.5, 0.25])) assert m.__call__.called assert ( m.__call__.call_args[1] == { 'non_convolved': False, 'onlyactive': True}) assert 2 == m.fetch_stored_values.call_count class TestModelSettingPZero: def setup_method(self, method): s = hs.signals.Signal1D(np.empty(1)) m = s.create_model() m.append(hs.model.components1D.Gaussian()) m[0].A.value = 1.1 m[0].centre._number_of_elements = 2 m[0].centre.value = (2.2, 3.3) m[0].sigma.value = 4.4 m[0].sigma.free = False m[0].A._bounds = (0.1, 0.11) m[0].centre._bounds = ((0.2, 0.21), (0.3, 0.31)) m[0].sigma._bounds = (0.4, 0.41) self.model = m def test_setting_p0(self): m = self.model m.append(hs.model.components1D.Gaussian()) m[-1].active = False m.p0 = None m._set_p0() assert m.p0 == (1.1, 2.2, 3.3) def test_fetching_from_p0(self): m = self.model m.append(hs.model.components1D.Gaussian()) m[-1].active = False m[-1].A.value = 100 m[-1].sigma.value = 200 m[-1].centre.value = 300 m.p0 = (1.2, 2.3, 3.4, 5.6, 6.7, 7.8) m._fetch_values_from_p0() assert m[0].A.value == 1.2 assert m[0].centre.value == (2.3, 3.4) assert m[0].sigma.value == 4.4 assert m[1].A.value == 100 assert m[1].sigma.value == 200 assert m[1].centre.value == 300 def test_setting_boundaries(self): m = self.model m.append(hs.model.components1D.Gaussian()) m[-1].active = False m.set_boundaries() assert (m.free_parameters_boundaries == [(0.1, 0.11), (0.2, 0.21), (0.3, 0.31)]) def test_setting_mpfit_parameters_info(self): m = self.model m[0].A.bmax = None m[0].centre.bmin = None m[0].centre.bmax = 0.31 m.append(hs.model.components1D.Gaussian()) m[-1].active = False m.set_mpfit_parameters_info() assert (m.mpfit_parinfo == [{'limited': [True, False], 'limits': [0.1, 0]}, {'limited': [False, True], 'limits': [0, 0.31]}, {'limited': [False, True], 'limits': [0, 0.31]}, ]) class TestModel1D: def setup_method(self, method): s = hs.signals.Signal1D(np.empty(1)) m = s.create_model() self.model = m def test_errfunc(self): m = self.model m._model_function = mock.MagicMock() m._model_function.return_value = 3. np.testing.assert_equal(m._errfunc(None, 1., None), 2.) np.testing.assert_equal(m._errfunc(None, 1., 0.3), 0.6) def test_errfunc2(self): m = self.model m._model_function = mock.MagicMock() m._model_function.return_value = 3. * np.ones(2) np.testing.assert_equal(m._errfunc2(None, np.ones(2), None), 2 * 4.) np.testing.assert_equal(m._errfunc2(None, np.ones(2), 0.3), 2 * 0.36) def test_gradient_ls(self): m = self.model m._errfunc = mock.MagicMock() m._errfunc.return_value = 0.1 m._jacobian = mock.MagicMock() m._jacobian.return_value = np.ones((1, 2)) * 7. np.testing.assert_equal(m._gradient_ls(None, None), 2 * 0.1 * 7 * 2) def test_gradient_ml(self): m = self.model m._model_function = mock.MagicMock() m._model_function.return_value = 3. * np.ones(2) m._jacobian = mock.MagicMock() m._jacobian.return_value = np.ones((1, 2)) * 7. np.testing.assert_equal( m._gradient_ml(None, 1.2), -2 * 7 * (1.2 / 3 - 1)) def test_model_function(self): m = self.model m.append(hs.model.components1D.Gaussian()) m[0].A.value = 1.3 m[0].centre.value = 0.003 m[0].sigma.value = 0.1 param = (100, 0.1, 0.2) np.testing.assert_array_almost_equal(176.03266338, m._model_function(param)) assert m[0].A.value == 100 assert m[0].centre.value == 0.1 assert m[0].sigma.value == 0.2 def test_append_existing_component(self): g = hs.model.components1D.Gaussian() m = self.model m.append(g) with pytest.raises(ValueError): m.append(g) def test_append_component(self): g = hs.model.components1D.Gaussian() m = self.model m.append(g) assert g in m assert g.model is m assert g._axes_manager is m.axes_manager assert all([hasattr(p, 'map') for p in g.parameters]) def test_calculating_convolution_axis(self): m = self.model # setup m.axis.offset = 10 m.axis.size = 10 ll_axis = mock.MagicMock() ll_axis.size = 7 ll_axis.value2index.return_value = 3 m._low_loss = mock.MagicMock() m.low_loss.axes_manager.signal_axes = [ll_axis, ] # calculation m.set_convolution_axis() # tests np.testing.assert_array_equal(m.convolution_axis, np.arange(7, 23)) np.testing.assert_equal(ll_axis.value2index.call_args[0][0], 0) def test_access_component_by_name(self): m = self.model g1 = hs.model.components1D.Gaussian() g2 = hs.model.components1D.Gaussian() g2.name = "test" m.extend((g1, g2)) assert m["test"] is g2 def test_access_component_by_index(self): m = self.model g1 = hs.model.components1D.Gaussian() g2 = hs.model.components1D.Gaussian() g2.name = "test" m.extend((g1, g2)) assert m[1] is g2 def test_component_name_when_append(self): m = self.model gs = [ hs.model.components1D.Gaussian(), hs.model.components1D.Gaussian(), hs.model.components1D.Gaussian()] m.extend(gs) assert m['Gaussian'] is gs[0] assert m['Gaussian_0'] is gs[1] assert m['Gaussian_1'] is gs[2] def test_several_component_with_same_name(self): m = self.model gs = [ hs.model.components1D.Gaussian(), hs.model.components1D.Gaussian(), hs.model.components1D.Gaussian()] m.extend(gs) m[0]._name = "hs.model.components1D.Gaussian" m[1]._name = "hs.model.components1D.Gaussian" m[2]._name = "hs.model.components1D.Gaussian" with pytest.raises(ValueError): m['Gaussian'] def test_no_component_with_that_name(self): m = self.model with pytest.raises(ValueError): m['Voigt'] def test_component_already_in_model(self): m = self.model g1 = hs.model.components1D.Gaussian() with pytest.raises(ValueError): m.extend((g1, g1)) def test_remove_component(self): m = self.model g1 = hs.model.components1D.Gaussian() m.append(g1) m.remove(g1) assert len(m) == 0 def test_remove_component_by_index(self): m = self.model g1 = hs.model.components1D.Gaussian() m.append(g1) m.remove(0) assert len(m) == 0 def test_remove_component_by_name(self): m = self.model g1 = hs.model.components1D.Gaussian() m.append(g1) m.remove(g1.name) assert len(m) == 0 def test_delete_component_by_index(self): m = self.model g1 = hs.model.components1D.Gaussian() m.append(g1) del m[0] assert g1 not in m def test_delete_component_by_name(self): m = self.model g1 = hs.model.components1D.Gaussian() m.append(g1) del m[g1.name] assert g1 not in m def test_delete_slice(self): m = self.model g1 = hs.model.components1D.Gaussian() g2 = hs.model.components1D.Gaussian() g3 = hs.model.components1D.Gaussian() g3.A.twin = g1.A g1.sigma.twin = g2.sigma m.extend([g1, g2, g3]) del m[:2] assert g1 not in m assert g2 not in m assert g3 in m assert not g1.sigma.twin assert not g1.A._twins def test_get_component_by_name(self): m = self.model g1 = hs.model.components1D.Gaussian() g2 = hs.model.components1D.Gaussian() g2.name = "test" m.extend((g1, g2)) assert m._get_component("test") is g2 def test_get_component_by_index(self): m = self.model g1 = hs.model.components1D.Gaussian() g2 = hs.model.components1D.Gaussian() g2.name = "test" m.extend((g1, g2)) assert m._get_component(1) is g2 def test_get_component_by_component(self): m = self.model g1 = hs.model.components1D.Gaussian() g2 = hs.model.components1D.Gaussian() g2.name = "test" m.extend((g1, g2)) assert m._get_component(g2) is g2 def test_get_component_wrong(self): m = self.model g1 = hs.model.components1D.Gaussian() g2 = hs.model.components1D.Gaussian() g2.name = "test" m.extend((g1, g2)) with pytest.raises(ValueError): m._get_component(1.2) def test_components_class_default(self): m = self.model g1 = hs.model.components1D.Gaussian() m.append(g1) assert getattr(m.components, g1.name) is g1 def test_components_class_change_name(self): m = self.model g1 = hs.model.components1D.Gaussian() m.append(g1) g1.name = "test" assert getattr(m.components, g1.name) is g1 def test_components_class_change_name_del_default(self): m = self.model g1 = hs.model.components1D.Gaussian() m.append(g1) g1.name = "test" with pytest.raises(AttributeError): getattr(m.components, "Gaussian") def test_components_class_change_invalid_name(self): m = self.model g1 = hs.model.components1D.Gaussian() m.append(g1) g1.name = "1, Test This!" assert ( getattr(m.components, slugify(g1.name, valid_variable_name=True)) is g1) def test_components_class_change_name_del_default2(self): m = self.model g1 = hs.model.components1D.Gaussian() m.append(g1) invalid_name = "1, Test This!" g1.name = invalid_name g1.name = "test" with pytest.raises(AttributeError): getattr(m.components, slugify(invalid_name)) def test_snap_parameter_bounds(self): m = self.model g1 = hs.model.components1D.Gaussian() m.append(g1) g2 = hs.model.components1D.Gaussian() m.append(g2) g3 = hs.model.components1D.Gaussian() m.append(g3) g4 = hs.model.components1D.Gaussian() m.append(g4) p = hs.model.components1D.Polynomial(3) m.append(p) g1.A.value = 3. g1.centre.bmin = 300. g1.centre.value = 1. g1.sigma.bmax = 15. g1.sigma.value = 30 g2.A.value = 1 g2.A.bmin = 0. g2.A.bmax = 3. g2.centre.value = 0 g2.centre.bmin = 1 g2.centre.bmax = 3. g2.sigma.value = 4 g2.sigma.bmin = 1 g2.sigma.bmax = 3. g3.A.bmin = 0 g3.A.value = -3 g3.A.free = False g3.centre.value = 15 g3.centre.bmax = 10 g3.centre.free = False g3.sigma.value = 1 g3.sigma.bmin = 0 g3.sigma.bmax = 0 g4.active = False g4.A.value = 300 g4.A.bmin = 500 g4.centre.value = 0 g4.centre.bmax = -1 g4.sigma.value = 1 g4.sigma.bmin = 10 p.coefficients.value = (1, 2, 3, 4) p.coefficients.bmin = 2 p.coefficients.bmax = 3 m.ensure_parameters_in_bounds() np.testing.assert_allclose(g1.A.value, 3.) np.testing.assert_allclose(g2.A.value, 1.) np.testing.assert_allclose(g3.A.value, -3.) np.testing.assert_allclose(g4.A.value, 300.) np.testing.assert_allclose(g1.centre.value, 300.) np.testing.assert_allclose(g2.centre.value, 1.) np.testing.assert_allclose(g3.centre.value, 15.) np.testing.assert_allclose(g4.centre.value, 0) np.testing.assert_allclose(g1.sigma.value, 15.) np.testing.assert_allclose(g2.sigma.value, 3.) np.testing.assert_allclose(g3.sigma.value, 0.) np.testing.assert_allclose(g4.sigma.value, 1) np.testing.assert_allclose(p.coefficients.value, (2, 2, 3, 3)) class TestModel2D: def setup_method(self, method): g = hs.model.components2D.Gaussian2D( centre_x=-5., centre_y=-5., sigma_x=1., sigma_y=2.) x = np.arange(-10, 10, 0.01) y = np.arange(-10, 10, 0.01) X, Y = np.meshgrid(x, y) im = hs.signals.Signal2D(g.function(X, Y)) im.axes_manager[0].scale = 0.01 im.axes_manager[0].offset = -10 im.axes_manager[1].scale = 0.01 im.axes_manager[1].offset = -10 self.im = im def test_fitting(self): im = self.im m = im.create_model() gt = hs.model.components2D.Gaussian2D(centre_x=-4.5, centre_y=-4.5, sigma_x=0.5, sigma_y=1.5) m.append(gt) m.fit() np.testing.assert_allclose(gt.centre_x.value, -5.) np.testing.assert_allclose(gt.centre_y.value, -5.) np.testing.assert_allclose(gt.sigma_x.value, 1.) np.testing.assert_allclose(gt.sigma_y.value, 2.) @lazifyTestClass class TestModelFitBinned: def setup_method(self, method): np.random.seed(1) s = hs.signals.Signal1D( np.random.normal( scale=2, size=10000)).get_histogram() s.metadata.Signal.binned = True g = hs.model.components1D.Gaussian() m = s.create_model() m.append(g) g.sigma.value = 1 g.centre.value = 0.5 g.A.value = 1e3 self.m = m def test_fit_neldermead_leastsq(self): self.m.fit(fitter="Nelder-Mead", method="ls") np.testing.assert_allclose(self.m[0].A.value, 9976.14519369) np.testing.assert_allclose(self.m[0].centre.value, -0.110610743285) np.testing.assert_allclose(self.m[0].sigma.value, 1.98380705455) def test_fit_neldermead_ml(self): self.m.fit(fitter="Nelder-Mead", method="ml") np.testing.assert_allclose(self.m[0].A.value, 10001.39613936, atol=1E-3) np.testing.assert_allclose(self.m[0].centre.value, -0.104151206314, atol=1E-6) np.testing.assert_allclose(self.m[0].sigma.value, 2.00053642434) def test_fit_leastsq(self): self.m.fit(fitter="leastsq") np.testing.assert_allclose(self.m[0].A.value, 9976.14526082, RTOL) np.testing.assert_allclose( self.m[0].centre.value, -0.110610727064, RTOL) np.testing.assert_allclose(self.m[0].sigma.value, 1.98380707571, RTOL) def test_fit_mpfit(self): self.m.fit(fitter="mpfit") np.testing.assert_allclose(self.m[0].A.value, 9976.14526286) np.testing.assert_allclose(self.m[0].centre.value, -0.110610718444, atol=1E-6) np.testing.assert_allclose(self.m[0].sigma.value, 1.98380707614, atol=1E-6) def test_fit_odr(self): self.m.fit(fitter="odr") np.testing.assert_allclose(self.m[0].A.value, 9976.14531979) np.testing.assert_allclose(self.m[0].centre.value, -0.110610724054, atol=1e-7) np.testing.assert_allclose(self.m[0].sigma.value, 1.98380709939) def test_fit_leastsq_grad(self): self.m.fit(fitter="leastsq", grad=True) np.testing.assert_allclose(self.m[0].A.value, 9976.14526084) np.testing.assert_allclose(self.m[0].centre.value, -0.11061073306) np.testing.assert_allclose(self.m[0].sigma.value, 1.98380707552) def test_fit_mpfit_grad(self): self.m.fit(fitter="mpfit", grad=True) np.testing.assert_allclose(self.m[0].A.value, 9976.14526084) np.testing.assert_allclose(self.m[0].centre.value, -0.11061073306) np.testing.assert_allclose(self.m[0].sigma.value, 1.98380707552) def test_fit_odr_grad(self): self.m.fit(fitter="odr", grad=True) np.testing.assert_allclose(self.m[0].A.value, 9976.14531979) np.testing.assert_allclose(self.m[0].centre.value, -0.110610724054, atol=1e-7) np.testing.assert_allclose(self.m[0].sigma.value, 1.98380709939) def test_fit_bounded_mpfit(self): self.m[0].centre.bmin = 0.5 # self.m[0].bounded = True self.m.fit(fitter="mpfit", bounded=True) np.testing.assert_allclose(self.m[0].A.value, 9991.65422046) np.testing.assert_allclose(self.m[0].centre.value, 0.5) np.testing.assert_allclose(self.m[0].sigma.value, 2.08398236966) def test_fit_bounded_leastsq(self): pytest.importorskip("scipy", minversion="0.17") self.m[0].centre.bmin = 0.5 # self.m[0].bounded = True self.m.fit(fitter="leastsq", bounded=True) np.testing.assert_allclose(self.m[0].A.value, 9991.65422046) np.testing.assert_allclose(self.m[0].centre.value, 0.5) np.testing.assert_allclose(self.m[0].sigma.value, 2.08398236966, RTOL) def test_fit_bounded_lbfgs(self): self.m[0].centre.bmin = 0.5 # self.m[0].bounded = True self.m.fit(fitter="L-BFGS-B", bounded=True, grad=True) np.testing.assert_allclose(self.m[0].A.value, 9991.65422046) np.testing.assert_allclose(self.m[0].centre.value, 0.5) np.testing.assert_allclose(self.m[0].sigma.value, 2.08398236966) def test_fit_bounded_bad_starting_values_mpfit(self): self.m[0].centre.bmin = 0.5 self.m[0].centre.value = -1 # self.m[0].bounded = True self.m.fit(fitter="mpfit", bounded=True) np.testing.assert_allclose(self.m[0].A.value, 9991.65422046) np.testing.assert_allclose(self.m[0].centre.value, 0.5) np.testing.assert_allclose(self.m[0].sigma.value, 2.08398236966) def test_fit_bounded_bad_starting_values_leastsq(self): self.m[0].centre.bmin = 0.5 self.m[0].centre.value = -1 # self.m[0].bounded = True self.m.fit(fitter="leastsq", bounded=True) np.testing.assert_allclose(self.m[0].A.value, 9991.65422046) np.testing.assert_allclose(self.m[0].centre.value, 0.5) np.testing.assert_allclose(self.m[0].sigma.value, 2.08398236966, RTOL) def test_fit_bounded_bad_starting_values_lbfgs(self): self.m[0].centre.bmin = 0.5 self.m[0].centre.value = -1 # self.m[0].bounded = True self.m.fit(fitter="L-BFGS-B", bounded=True, grad=True) np.testing.assert_allclose(self.m[0].A.value, 9991.65422046) np.testing.assert_allclose(self.m[0].centre.value, 0.5) np.testing.assert_allclose(self.m[0].sigma.value, 2.08398236966) def test_wrong_method(self): with pytest.raises(ValueError): self.m.fit(method="dummy") @lazifyTestClass class TestModelWeighted: def setup_method(self, method): np.random.seed(1) s = hs.signals.Signal1D(np.arange(10, 100, 0.1)) s.metadata.set_item("Signal.Noise_properties.variance", hs.signals.Signal1D(np.arange(10, 100, 0.01))) s.axes_manager[0].scale = 0.1 s.axes_manager[0].offset = 10 s.add_poissonian_noise() m = s.create_model() m.append(hs.model.components1D.Polynomial(1)) self.m = m def test_fit_leastsq_binned(self): self.m.signal.metadata.Signal.binned = True self.m.fit(fitter="leastsq", method="ls") for result, expected in zip(self.m[0].coefficients.value, (9.9165596693502778, 1.6628238107916631)): np.testing.assert_allclose(result, expected, atol=1E-5) def test_fit_odr_binned(self): self.m.signal.metadata.Signal.binned = True self.m.fit(fitter="odr", method="ls") for result, expected in zip(self.m[0].coefficients.value, (9.9165596548961972, 1.6628247412317521)): np.testing.assert_allclose(result, expected, atol=1E-5) def test_fit_mpfit_binned(self): self.m.signal.metadata.Signal.binned = True self.m.fit(fitter="mpfit", method="ls") for result, expected in zip(self.m[0].coefficients.value, (9.9165596607108739, 1.6628243846485873)): np.testing.assert_allclose(result, expected, atol=1E-5) def test_fit_neldermead_binned(self): self.m.signal.metadata.Signal.binned = True self.m.fit( fitter="Nelder-Mead", method="ls", ) for result, expected in zip(self.m[0].coefficients.value, (9.9137288425667442, 1.8446013472266145)): np.testing.assert_allclose(result, expected, atol=1E-5) def test_fit_leastsq_unbinned(self): self.m.signal.metadata.Signal.binned = False self.m.fit(fitter="leastsq", method="ls") for result, expected in zip( self.m[0].coefficients.value, (0.99165596391487121, 0.16628254242532492)): np.testing.assert_allclose(result, expected, atol=1E-5) def test_fit_odr_unbinned(self): self.m.signal.metadata.Signal.binned = False self.m.fit(fitter="odr", method="ls") for result, expected in zip( self.m[0].coefficients.value, (0.99165596548961943, 0.16628247412317315)): np.testing.assert_allclose(result, expected, atol=1E-5) def test_fit_mpfit_unbinned(self): self.m.signal.metadata.Signal.binned = False self.m.fit(fitter="mpfit", method="ls") for result, expected in zip( self.m[0].coefficients.value, (0.99165596295068958, 0.16628257462820528)): np.testing.assert_allclose(result, expected, atol=1E-5) def test_fit_neldermead_unbinned(self): self.m.signal.metadata.Signal.binned = False self.m.fit( fitter="Nelder-Mead", method="ls", ) for result, expected in zip( self.m[0].coefficients.value, (0.99136169230026261, 0.18483060534056939)): np.testing.assert_allclose(result, expected, atol=1E-5) def test_chisq(self): self.m.signal.metadata.Signal.binned = True self.m.fit(fitter="leastsq", method="ls") np.testing.assert_allclose(self.m.chisq.data, 3029.16949561) def test_red_chisq(self): self.m.fit(fitter="leastsq", method="ls") np.testing.assert_allclose(self.m.red_chisq.data, 3.37700055) class TestModelScalarVariance: def setup_method(self, method): s = hs.signals.Signal1D(np.ones(100)) m = s.create_model() m.append(hs.model.components1D.Offset()) self.s = s self.m = m def test_std1_chisq(self): std = 1 np.random.seed(1) self.s.add_gaussian_noise(std) self.s.metadata.set_item("Signal.Noise_properties.variance", std ** 2) self.m.fit(fitter="leastsq", method="ls") np.testing.assert_allclose(self.m.chisq.data, 78.35015229) def test_std10_chisq(self): std = 10 np.random.seed(1) self.s.add_gaussian_noise(std) self.s.metadata.set_item("Signal.Noise_properties.variance", std ** 2) self.m.fit(fitter="leastsq", method="ls") np.testing.assert_allclose(self.m.chisq.data, 78.35015229) def test_std1_red_chisq(self): std = 1 np.random.seed(1) self.s.add_gaussian_noise(std) self.s.metadata.set_item("Signal.Noise_properties.variance", std ** 2) self.m.fit(fitter="leastsq", method="ls") np.testing.assert_allclose(self.m.red_chisq.data, 0.79949135) def test_std10_red_chisq(self): std = 10 np.random.seed(1) self.s.add_gaussian_noise(std) self.s.metadata.set_item("Signal.Noise_properties.variance", std ** 2) self.m.fit(fitter="leastsq", method="ls") np.testing.assert_allclose(self.m.red_chisq.data, 0.79949135) def test_std1_red_chisq_in_range(self): std = 1 self.m.set_signal_range(10, 50) np.random.seed(1) self.s.add_gaussian_noise(std) self.s.metadata.set_item("Signal.Noise_properties.variance", std ** 2) self.m.fit(fitter="leastsq", method="ls") np.testing.assert_allclose(self.m.red_chisq.data, 0.86206965) @lazifyTestClass class TestModelSignalVariance: def setup_method(self, method): variance = hs.signals.Signal1D( np.arange(100, 300, dtype="float64").reshape((2, 100))) s = variance.deepcopy() np.random.seed(1) std = 10 np.random.seed(1) s.add_gaussian_noise(std) np.random.seed(1) s.add_poissonian_noise() s.metadata.set_item("Signal.Noise_properties.variance", variance + std ** 2) m = s.create_model() m.append(hs.model.components1D.Polynomial(order=1)) self.s = s self.m = m def test_std1_red_chisq(self): self.m.multifit(fitter="leastsq", method="ls", show_progressbar=None) np.testing.assert_allclose(self.m.red_chisq.data[0], 0.813109, atol=1e-5) np.testing.assert_allclose(self.m.red_chisq.data[1], 0.697727, atol=1e-5) @lazifyTestClass class TestMultifit: def setup_method(self, method): s = hs.signals.Signal1D(np.zeros((2, 200))) s.axes_manager[-1].offset = 1 s.data[:] = 2 * s.axes_manager[-1].axis ** (-3) m = s.create_model() m.append(hs.model.components1D.PowerLaw()) m[0].A.value = 2 m[0].r.value = 2 m.store_current_values() m.axes_manager.indices = (1,) m[0].r.value = 100 m[0].A.value = 2 m.store_current_values() m[0].A.free = False self.m = m m.axes_manager.indices = (0,) m[0].A.value = 100 def test_fetch_only_fixed_false(self): self.m.multifit(fetch_only_fixed=False, show_progressbar=None) np.testing.assert_array_almost_equal(self.m[0].r.map['values'], [3., 100.]) np.testing.assert_array_almost_equal(self.m[0].A.map['values'], [2., 2.]) def test_fetch_only_fixed_true(self): self.m.multifit(fetch_only_fixed=True, show_progressbar=None) np.testing.assert_array_almost_equal(self.m[0].r.map['values'], [3., 3.]) np.testing.assert_array_almost_equal(self.m[0].A.map['values'], [2., 2.]) def test_parameter_as_signal_values(self): # There are more as_signal tests in test_parameters.py rs = self.m[0].r.as_signal(field="values") np.testing.assert_allclose(rs.data, np.array([2., 100.])) assert not "Signal.Noise_properties.variance" in rs.metadata self.m.multifit(fetch_only_fixed=True, show_progressbar=None) rs = self.m[0].r.as_signal(field="values") assert "Signal.Noise_properties.variance" in rs.metadata assert isinstance(rs.metadata.Signal.Noise_properties.variance, hs.signals.Signal1D) def test_bounded_snapping_mpfit(self): m = self.m m[0].A.free = True m.signal.data *= 2. m[0].A.value = 2. m[0].A.bmin = 3. m.multifit(fitter='mpfit', bounded=True, show_progressbar=None) np.testing.assert_array_almost_equal(self.m[0].r.map['values'], [3., 3.]) np.testing.assert_array_almost_equal(self.m[0].A.map['values'], [4., 4.]) def test_bounded_snapping_leastsq(self): m = self.m m[0].A.free = True m.signal.data *= 2. m[0].A.value = 2. m[0].A.bmin = 3. m.multifit(fitter='leastsq', bounded=True, show_progressbar=None) np.testing.assert_array_almost_equal(self.m[0].r.map['values'], [3., 3.]) np.testing.assert_array_almost_equal(self.m[0].A.map['values'], [4., 4.]) class TestStoreCurrentValues: def setup_method(self, method): self.m = hs.signals.Signal1D(np.arange(10)).create_model() self.o = hs.model.components1D.Offset() self.m.append(self.o) def test_active(self): self.o.offset.value = 2 self.o.offset.std = 3 self.m.store_current_values() assert self.o.offset.map["values"][0] == 2 assert self.o.offset.map["is_set"][0] == True def test_not_active(self): self.o.active = False self.o.offset.value = 2 self.o.offset.std = 3 self.m.store_current_values() assert self.o.offset.map["values"][0] != 2 class TestSetCurrentValuesTo: def setup_method(self, method): self.m = hs.signals.Signal1D( np.arange(10).reshape(2, 5)).create_model() self.comps = [ hs.model.components1D.Offset(), hs.model.components1D.Offset()] self.m.extend(self.comps) def test_set_all(self): for c in self.comps: c.offset.value = 2 self.m.assign_current_values_to_all() assert (self.comps[0].offset.map["values"] == 2).all() assert (self.comps[1].offset.map["values"] == 2).all() def test_set_1(self): self.comps[1].offset.value = 2 self.m.assign_current_values_to_all([self.comps[1]]) assert (self.comps[0].offset.map["values"] != 2).all() assert (self.comps[1].offset.map["values"] == 2).all() def test_fetch_values_from_arrays(): m = hs.signals.Signal1D(np.arange(10)).create_model() gaus = hs.model.components1D.Gaussian(A=100, sigma=10, centre=3) m.append(gaus) values = np.array([1.2, 3.4, 5.6]) stds = values - 1 m.fetch_values_from_array(values, array_std=stds) parameters = sorted(gaus.free_parameters, key=lambda x: x.name) for v, s, p in zip(values, stds, parameters): assert p.value == v assert p.std == s def sets_second_parameter_to_two(model, parameters, data, weights=None): return np.abs(parameters[1] - 2) class TestCustomOptimisation: def setup_method(self, method): s = hs.signals.Signal1D([1., 2, 3, 5, 7, 12, 8, 6, 3, 2, 2]) # data that should fit with A=49, centre=5.13, sigma=2.0 self.m = s.create_model() self.m.append(hs.model.components1D.Gaussian()) def test_custom_function(self): m = self.m m.fit(method='custom', min_function=sets_second_parameter_to_two, fitter='TNC') assert m[0].centre.value == 2. def test_no_function(self): with pytest.raises(ValueError): self.m.fit(method='custom') def test_no_gradient(self): with pytest.raises(ValueError): self.m.fit(method='custom', min_function=lambda *args: 1, grad=True ) def test_custom_gradient_function(self): from unittest import mock gradf = mock.Mock(return_value=[10, 1, 10]) self.m.fit(method='custom', fitter='BFGS', min_function=sets_second_parameter_to_two, grad=True, min_function_grad=gradf) assert gradf.called assert all([args[0] is self.m for args, kwargs in gradf.call_args_list]) class TestAsSignal: def setup_method(self, method): self.m = hs.signals.Signal1D( np.arange(20).reshape(2, 2, 5)).create_model() self.comps = [ hs.model.components1D.Offset(), hs.model.components1D.Offset()] self.m.extend(self.comps) for c in self.comps: c.offset.value = 2 self.m.assign_current_values_to_all() @pytest.mark.parallel def test_threaded_identical(self): # all components s = self.m.as_signal(show_progressbar=False, parallel=True) s1 = self.m.as_signal(show_progressbar=False, parallel=False) np.testing.assert_allclose(s1.data, s.data) # more complicated self.m[0].active_is_multidimensional = True self.m[0]._active_array[0] = False for component in [0, 1]: s = self.m.as_signal(component_list=[component], show_progressbar=False, parallel=True) s1 = self.m.as_signal(component_list=[component], show_progressbar=False, parallel=False) np.testing.assert_allclose(s1.data, s.data) @pytest.mark.parametrize('parallel', [pytest.param(True, marks=pytest.mark.parallel), False]) def test_all_components_simple(self, parallel): s = self.m.as_signal(show_progressbar=False, parallel=parallel) assert np.all(s.data == 4.) @pytest.mark.parametrize('parallel', [pytest.param(True, marks=pytest.mark.parallel), False]) def test_one_component_simple(self, parallel): s = self.m.as_signal(component_list=[0], show_progressbar=False, parallel=parallel) assert np.all(s.data == 2.) assert self.m[1].active @pytest.mark.parametrize('parallel', [pytest.param(True, marks=pytest.mark.parallel), False]) def test_all_components_multidim(self, parallel): self.m[0].active_is_multidimensional = True s = self.m.as_signal(show_progressbar=False, parallel=parallel) assert np.all(s.data == 4.) self.m[0]._active_array[0] = False s = self.m.as_signal(show_progressbar=False, parallel=parallel) np.testing.assert_array_equal( s.data, np.array([np.ones((2, 5)) * 2, np.ones((2, 5)) * 4])) assert self.m[0].active_is_multidimensional @pytest.mark.parametrize('parallel', [pytest.param(True, marks=pytest.mark.parallel), False]) def test_one_component_multidim(self, parallel): self.m[0].active_is_multidimensional = True s = self.m.as_signal(component_list=[0], show_progressbar=False, parallel=parallel) assert np.all(s.data == 2.) assert self.m[1].active assert not self.m[1].active_is_multidimensional s = self.m.as_signal(component_list=[1], show_progressbar=False, parallel=parallel) np.testing.assert_equal(s.data, 2.) assert self.m[0].active_is_multidimensional self.m[0]._active_array[0] = False s = self.m.as_signal(component_list=[1], show_progressbar=False, parallel=parallel) assert np.all(s.data == 2.) s = self.m.as_signal(component_list=[0], show_progressbar=False, parallel=parallel) np.testing.assert_array_equal(s.data, np.array([np.zeros((2, 5)), np.ones((2, 5)) * 2])) @lazifyTestClass class TestCreateModel: def setup_method(self, method): self.s = hs.signals.Signal1D(np.asarray([0, ])) self.im = hs.signals.Signal2D(np.ones([1, 1, ])) def test_create_model(self): from hyperspy.models.model1d import Model1D from hyperspy.models.model2d import Model2D assert isinstance(self.s.create_model(), Model1D) assert isinstance(self.im.create_model(), Model2D) class TestAdjustPosition: def setup_method(self, method): self.s = hs.signals.Signal1D(np.random.rand(10, 10, 20)) self.m = self.s.create_model() def test_enable_adjust_position(self, mpl_cleanup): self.m.append(hs.model.components1D.Gaussian()) self.m.enable_adjust_position() assert len(self.m._position_widgets) == 1 # Check that both line and label was added assert len(list(self.m._position_widgets.values())[0]) == 2 def test_disable_adjust_position(self, mpl_cleanup): self.m.append(hs.model.components1D.Gaussian()) self.m.enable_adjust_position() self.m.disable_adjust_position() assert len(self.m._position_widgets) == 0 def test_enable_all(self, mpl_cleanup): self.m.append(hs.model.components1D.Gaussian()) self.m.enable_adjust_position() self.m.append(hs.model.components1D.Gaussian()) assert len(self.m._position_widgets) == 2 def test_enable_all_zero_start(self, mpl_cleanup): self.m.enable_adjust_position() self.m.append(hs.model.components1D.Gaussian()) assert len(self.m._position_widgets) == 1 def test_manual_close(self, mpl_cleanup): self.m.append(hs.model.components1D.Gaussian()) self.m.append(hs.model.components1D.Gaussian()) self.m.enable_adjust_position() list(self.m._position_widgets.values())[0][0].close() assert len(self.m._position_widgets) == 2 assert len(list(self.m._position_widgets.values())[0]) == 1 list(self.m._position_widgets.values())[0][0].close() assert len(self.m._position_widgets) == 1 assert len(list(self.m._position_widgets.values())[0]) == 2 self.m.disable_adjust_position() assert len(self.m._position_widgets) == 0
magnunor/hyperspy
hyperspy/tests/model/test_model.py
Python
gpl-3.0
43,748
[ "Gaussian" ]
256636670fd1dc3b3d27f636a53cce22d64b0477ee56e078fbabb09f7f0c4640
import argparse import logging import pysam import re def interval_union(a, b): """Returns a 2-tuple representing the union of two intervals. Args: a: 2-tuple containing integer coordinates representing a half-open interval. b: 2-tuple containing integer coordinates representing the other half-open interval. Returns: 2-tuple containing integer coordinates representing the union(a, b) as a half-open interval. """ return min(a[0], b[0]), max(a[1], b[1]) def do_intervals_intersect(a, b): """Returns true if the given 2-tuples overlap. Args: a: 2-tuple containing integer coordinates representing a half-open interval. b: 2-tuple containing integer coordinates representing the other half-open interval. Returns: True if a and b overlap. """ return a[0] < b[1] and a[1] > b[0] def read_name_hash(read_name): """Takes a read name from an input bam and shortens & obfuscates it""" return str(abs(hash(read_name)) % 10**9) # 9-digit read name def postprocess_bam(input_bam_path, output_bam_path, chrom, pos, ref, alt): """Copies the input_bam to the output_bam while discarding extraneous or sensitive information. Leaves only the minimum required header, obfuscates and downsizes read names, discards all tags (including read groups). If the input bam doesn't have any reads, the output bam won't be written. Args: input_bam_path: input bam path output_bam_path: output bam path chrom: chromosome (eg. '1' or 'X') pos: minrep'ed variant position integer (eg. 12345) ref: minrep'ed ref allele (eg. 'A', 'ACT', etc.) alt: minrep'ed alt allele (eg. 'GCT', 'C', etc.) Return: 2-tuple with (is_empty, artificial_haplotype_counter) where is_empty: is True if the input_bam was empty artificial_haplotype_counter: the number of artificial haplotypes found in the input bam artificial_haplotypes_deleted_counter: the number of artificial haplotypes discarded because they overlap other artificial haplotypes in a way that might cause double-counting of reads. """ # compute variant start, end reference coords (half-open) variant_start = pos variant_end = pos + len(ref) # This counter is used as a sanity check that HC added at least one artificial haplotype (typically it adds # 2*n of these where n is the number of SNPs in the region). artificial_haplotype_counter = 0 # artificial haplotype coords are half-open (eg. (start=83, end=93) has length 10) union_of_artificial_haplotypes_that_overlap_variant = (1e9, 0) # union of genomic intervals spanned by artificial haplotypes that overlap the variant artificial_haplotypes_that_dont_overlap_variant = {} # maps each artificial haplotype id (eg. HC tag value) to the interval spanned by this artificial haplotype: (r.reference_start, r.reference_end) # iterate over the reads raw_reads = {} # maps each artificial haplotype id (eg. HC tag value) to the list of reads assigned to this haplotype (eg. that have this id in their HC tag) ibam = pysam.AlignmentFile(input_bam_path, "rb") for r in ibam: tags = dict(r.tags) haplotype_id = tags['HC'] if tags.get('RG') == "ArtificialHaplotype": # handle reads that are actually artificial haplotypes artificial_haplotype_counter += 1 # check whether the artificial haplotype overlaps the variant if r.reference_start >= variant_end or r.reference_end <= variant_start: # there's no overlap artificial_haplotypes_that_dont_overlap_variant[haplotype_id] = (r.reference_start, r.reference_end) else: union_of_artificial_haplotypes_that_overlap_variant = interval_union( (r.reference_start, r.reference_end), union_of_artificial_haplotypes_that_overlap_variant) else: # this is a regular read - save it, hashed by the haplotype_id of the haplotype that it was mapped to. if haplotype_id not in raw_reads: raw_reads[haplotype_id] = [] raw_reads[haplotype_id].append(r) artificial_haplotypes_deleted_counter = 0 if not raw_reads: is_bam_empty = True return (is_bam_empty, artificial_haplotype_counter, artificial_haplotypes_deleted_counter) # For each artificial haplotype that doesn't overlap the variant, check if it overlaps any of the artificial # haplotypes that do overlap the variant. If it does then discard all raw reads that map to it since these reads # cause bumps in the coverage plot due to double-counting of the overlapping reads. for haplotype_id, artificial_haplotype_that_doesnt_overlap_variant in artificial_haplotypes_that_dont_overlap_variant.items(): if haplotype_id not in raw_reads: continue # skip haplotypes that have no reads mapped to them (this does happen) if do_intervals_intersect( artificial_haplotype_that_doesnt_overlap_variant, union_of_artificial_haplotypes_that_overlap_variant): # intersection found, so delete all reads mapping to this haplotype that doesn't overlap the variant artificial_haplotypes_deleted_counter += 1 del raw_reads[haplotype_id] # sanity check if not raw_reads: is_bam_empty = True return (is_bam_empty, artificial_haplotype_counter, artificial_haplotypes_deleted_counter) #assert artificial_haplotype_counter > 0, \ # "Expected HaplotypeCaller to add at least one record with " \ # "RG == 'ArtificialHaplotype'. " \ # "%(input_bam_path)s => %(output_bam_path)s" % locals() if artificial_haplotypes_deleted_counter > 0: logging.info(("post-processing: discarded %(artificial_haplotypes_deleted_counter)d out of " "%(artificial_haplotype_counter)d artificial haplotypes") % locals()) # write out the bam reference_sequences = [] for reference_id in range(len(ibam.header['SQ'])): d = {} reference_sequences.append(d) for key, value in ibam.header['SQ'][reference_id].items(): if key in ["SN", "LN"]: d[key] = value header = {'HD': { 'VN': '1.4', 'SO': 'coordinate' }, 'SQ': reference_sequences, 'RG': [], } is_bam_empty = True obam = pysam.AlignmentFile(output_bam_path, "wb", header=header) for hc, reads in raw_reads.items(): for r in reads: # copy info from r to s s = pysam.AlignedSegment() s.query_name = read_name_hash(r.query_name) s.query_sequence = r.query_sequence s.flag = r.flag s.reference_id = r.reference_id # since the bam should only have reads from one chromosome, there will always be just 1 chromosome entry in the header, and so this reference_id can always be 0. s.reference_start = r.reference_start s.mapping_quality = r.mapping_quality s.cigar = r.cigar s.next_reference_id = r.next_reference_id s.next_reference_start = r.next_reference_start s.template_length = r.template_length s.query_qualities = r.query_qualities obam.write(s) is_bam_empty = False if obam is not None: obam.close() return (is_bam_empty, artificial_haplotype_counter, artificial_haplotypes_deleted_counter) if __name__ == "__main__": p = argparse.ArgumentParser("Takes an HC output bam and discards non-essential header fields and tags, obfuscates read names, etc.") p.add_argument("-i", "--input-bam", help=".bam output from HaplotypeCaller", required=True) p.add_argument("-o", "--output-bam", help="Postprocessed bam", required=True) args = p.parse_args() match = re.search('([0-9XY]{1,2})-([0-9]{1,9})-([ACGTN]+)-([ACGTN]+)', args.input_bam) chrom = match.group(1) pos = int(match.group(2)) ref = match.group(3) alt = match.group(4) postprocess_bam(args.input_bam, args.output_bam, chrom, pos, ref, alt)
macarthur-lab/exac_readviz_scripts
utils/postprocess_reassembled_bam.py
Python
mit
8,315
[ "pysam" ]
9d94fc017ca78bc51d60029d3b8fa04782628672897dc49956fa6a818f78d4aa
import numpy as np import cv2 from cv2 import * from matplotlib import pyplot as plt import time import rospy import std_msgs from sensor_msgs import point_cloud2 import sensor_msgs import math ################################################################################################### ## Chicago Engineering Design Team ## Line Detection using Python OpenCV for autonomous robot Scipio (IGVC competition) ## @author Basheer Subei ## @email basheersubei@gmail.com ####################################################### ## line detection overview as of 3/27/2014: ## ## main procedure: ## 1. GUI elements created ## 2. image rendered for first time ## 3. wait for user key and render image if trackbars change ## ## Rendering: ## 1. obtain image and region of interest (ROI) ## 2. mask out green areas using hsv filter (uses absolute values, not relative yet) ## 3. gaussian blur (removes high-frequency noise) ## 4. global threshold (depends on brightness or something) ## 5. equalize histogram (not adaptively) ## 6. adaptive threshold ## 7. display image ## ## TODO conform to PEP8 style standard and consider using Flake8 to check it. ########################################################################################################################### global_threshold = 160 global_threshold_factor = 2 adaptive_threshold_block_size = 101 adaptive_threshold_C = 30 blur_size = 49 # canny_threshold = 100 max_erode_iterations = 100 bandpass_low_cutoff = 1 bandpass_high_cutoff = 30 #hsv threshold variables hue_low = 20 hue_high = 50 saturation_low = 0 saturation_high = 255 value_low = 0 value_high = 255 file_number = 1 backprojection_threshold = 50 use_cam = False ## initialize webcam cam = VideoCapture() def nothing(x): pass # converts pixels from camera space to 3-d space ## TODO take all these constants from ROS parameters def publish_line_data(camera_image, horizon = 0) : # get indices of nonzero elements (pixels that are lines) pixels = np.transpose(camera_image.nonzero()) #pixels contains the line obstacle pixels that need to be converted to PCL #height = height of the camera from the ground in meters height = 0.7 orien = 65 # 90 - 25 #orien = orientation of the camera (pitch angle from vertical) ##TODO find dimensions dynamically image_width = camera_image.shape[0] image_height = camera_image.shape[1] horAngl = 120.0 #field of view verAngl = 120.0 * 9.0 / 16.0 #field of view #reference horizon that will be used in calculations. refHor = image_height - horizon #differnce in angle that each pixel corresponds to dAV = verAngl / image_height #create the cloud # cloud = sensor_msgs.msg.PointCloud(rospy.Time.now(), pixels, 0) # cloud = sensor_msgs.msg.PointCloud2() #create the header # cloud.header.stamp = rospy.Time.now() # cloud.header.frame_id = "sensor_frame" # cloud.channels.resize(1) #sets the size of the cloud. This was need in c++ so i think it will be needed in python # cloud.resize(len(pixels)) # cloud.set_points_size(len(pixels)) cloud_points = np.array([[0 for i in range(0,3)] for j in range(len(pixels))], dtype = float) # print "length of cloud points is ", len(cloud.points) # print pixels[0][0] #if there is no horizon argument passed into the function it will default to this method if horizon == 0 : for i in range(0,len(pixels)) : temp = height * math.tan((orien - (verAngl / 2) + (image_height - pixels[i][1])*dAV) * math.pi / 180.0) cloud_points[i][0] = (float(pixels[i][0] - (image_width / 2)) / (image_width / 2)) * temp * math.tan(horAngl * math.pi / 180.0) cloud_points[i][1] = temp cloud_points[i][2] = 0.0 else : #reference horizon that will be used in calculations refHor = image_height - horizon #fills the cloud with the line data for i in range(0,len(pixels)) : temp = height * math.tan( (90 - (refHor - abs(pixels[i][1] - image_height)) * dAV) * math.pi / 180.0) cloud_points[i][0] = (float(pixels[i][0] - (image_width / 2)) / (image_width / 2)) * temp * math.tan(horAngl * math.pi / 180.0) cloud_points[i][1] = temp cloud_points[i][2] = 0.0 # cloud_header = std_msgs.header.Header() print cloud_points cloud = sensor_msgs.msg.PointCloud2() cloud = sensor_msgs.point_cloud2.create_cloud_xyz32(cloud.header, cloud_points) cloud.header.frame_id = "base_camera" linePub.publish(cloud) return def render_image(): start_time = time.time() if use_cam: success = True if not cam.isOpened(): success = cam.open(0) if success: ok, img = cam.read() else: file_name = 'parking_test' + str(file_number) + '.jpg' img = cv2.imread(file_name,1) # read in image (from file) ##TODO check if file doesnt exist # our region of interest is only in bottom half of image horizon = img.shape[0]/2 roi = img[horizon:, :] gray_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY) # convert BGR image (by default using imshow) to GRAY # roi = gray_image # note: GIMP color picker has ranges for HSV from 0 to 360 (H), 100 (S), and 100 (V) # need to convert those ranges into opencv HSV ranges 179 (H), 255 (S), and 255 (V). ## use green color in HSV to mask out all the uninteresting points # Convert BGR to HSV hsv = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV) # make HSV histogram and look for regions with lots of green Hue. For those regions find their range of HSV so that they can be masked out # Grass Hue ranges from 20:50 , Saturation ranges from 50:125 , Value ranges from 0:150. # White lines Hue ranges from 120:150 , Saturation ranges from 0:50 , Value ranges from 150:255. # all above values are in opencv HSV ranges # note that Values depend on overall brightness (need to use adaptive method or dynamic one). backprojection_training = cv2.imread('training_for_backprojection_1.png') backprojection_training = cv2.cvtColor(backprojection_training, cv2.COLOR_BGR2HSV) # begin HISTOGRAM BACKPROJECTION # calculating object histogram roihist = cv2.calcHist([backprojection_training],[0, 1], None, [180, 256], [hue_low, hue_high, 0, 256] ) # plt.hist(roihist.ravel(),256,[40,256]); plt.show() #roi histogram (starting from value 1, skipping zero values) # normalize histogram and apply backprojection cv2.normalize(roihist,roihist,1,255,cv2.NORM_MINMAX) # plt.hist(roihist.ravel(),256,[40,256]); plt.show() #roi histogram (starting from value 1, skipping zero values) dst = cv2.calcBackProject([hsv],[0,1],roihist,[hue_low,hue_high,0,256],1) # Now convolute with circular disc disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5)) cv2.filter2D(dst,-1,disc,dst) # do we need this convolution??? # invert dst (because the backprojection chooses what we DON'T want) dst = 255 - dst # threshold the backprojection stuff to only grab the more probable ones ret,thresh = cv2.threshold(dst,backprojection_threshold,0,cv2.THRESH_TOZERO) # AND the remaining backprojection pixels with the original gray image (we will only use gray so far so # we don't need to use BGR or HSV. If we did, then we could've merged thresh into a 3-channel image then AND'ed # with our original BGR or HSV) after_backprojection = cv2.bitwise_and(gray_roi, thresh) # cv2.imshow('backprojection_matrix', thresh) cv2.imshow('backprojection_result', after_backprojection) ## end HISTOGRAM BACKPROJECTION # TODO actually connect backprojection output to rest of filter # TODO consider revising bitwise AND to a weighting algorithm (because it's too exclusive, we might miss good points) ## begin hsv threshold # define range of blue color in HSV # lower_grass = np.array([hue_low, saturation_low, value_low]) # upper_grass = np.array([hue_high, saturation_high, value_high]) # # Threshold the HSV image to get only blue colors # mask = cv2.inRange(hsv, lower_grass, upper_grass) # mask_inv = 255-mask #invert the mask (because the mask selected the parts we did NOT want) # # Bitwise-AND mask and original image # after_hsv = cv2.bitwise_and(gray_roi, gray_roi, mask=mask_inv) ## end hsv threshold #roi = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY) ## abandoning bandpass filter because I couldn't figure out how to convert the float np array back to meaningful int arrays to use it for the mask ## and yet, opencv can display those float np arrays just fine... ??? ## testing out bandpass filter thingie (from fourier transform page on opencvPython tut) # dft = cv2.dft(np.float32(roi),flags = cv2.DFT_COMPLEX_OUTPUT) # dft_shift = np.fft.fftshift(dft) # rows, cols = roi.shape # crow,ccol = rows/2 , cols/2 # #TODO improve mask for bandpass filter # # create a mask (square with a smaller square hole in it) # mask = np.zeros((rows,cols,2),np.uint8) # mask[crow-bandpass_high_cutoff:crow+bandpass_high_cutoff, ccol-bandpass_high_cutoff:ccol+bandpass_high_cutoff] = 1 # mask[crow-bandpass_low_cutoff:crow+bandpass_low_cutoff, ccol-bandpass_low_cutoff:ccol+bandpass_low_cutoff] = 0 # # apply mask and inverse DFT # fshift = dft_shift*mask # f_ishift = np.fft.ifftshift(fshift) # img_back = cv2.idft(f_ishift) # img_back = cv2.magnitude(img_back[:,:,0],img_back[:,:,1]) # cv2.imshow('afterBandpass', img_back) # # use bandpass filter as mask (only include pixels that are part of bandpass) multiplying img_back and roi # print img_back # print img_back.dtype # img_back = img_back.astype(int) * 256 # convert the bandpass mask from float32 to uint8 # print img_back # print img_back.dtype # img_back = cv2.bitwise_not(img_back) # invert the image (to make the mask match the stuff within bandpass frequencies) # cv2.imshow('afterBandpass2', img_back) # masked_bandpass = cv2.bitwise_and(img_back, roi) # plot the histogram for analysis #plt.hist(gray_image.ravel(),256,[0,256]); plt.show() # original image histogram #plt.hist(roi.ravel(),256,[0,256]); plt.show() #roi histogram blur = cv2.GaussianBlur(after_backprojection, (blur_size, blur_size), 0) # perform gaussian blur on grayscale image # blur = cv2.medianBlur(roi, blur_size) # perform median blur on grayscale image # blur = cv2.bilateralFilter(roi,blur_size,150,150) # global threshold (to zero out below threshold and leave other stuff as is) # first returned object is ignored # find (normalized to 1) mean of image brightness normalized_brightness = cv2.mean(gray_roi)[0] / 255 print "normalized brightness: ", normalized_brightness retval, global_thresh = cv2.threshold(blur, global_threshold * normalized_brightness * global_threshold_factor, 0, cv2.THRESH_TOZERO) # equalize histogram (globally) equ = cv2.equalizeHist(global_thresh) # TODO fix up histogram (maybe too many 0-value pixels in the histogram that skew it?) ## CLAHE not available before OpenCV 3.0 # perform CLAHE (adaptive histogram equalization) #clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8)) #equ = clahe.apply(roi) #plt.hist(equ.ravel(),256,[1,256]); plt.show() #roi histogram (starting from value 1, skipping zero values) #testing Laplacian filter (not useful) # laplacian = cv2.Laplacian(equ,cv2.CV_64F) # cv2.imshow('laplacian', laplacian) # perform adaptive threshold cv2.imshow('before adaptive threshold', equ) thresh = cv2.adaptiveThreshold(equ, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, adaptive_threshold_block_size, adaptive_threshold_C - 30) cv2.imshow("after adaptive threshold", thresh) ## DEBUG: prints out side by side for comparison (analyzing effect of histogram equalization) # res = np.hstack((roi,equ)) #stacking images side-by-side ## skeletonize image count = 0 size = np.size(thresh) skel = np.zeros(thresh.shape,np.uint8) element = cv2.getStructuringElement(cv2.MORPH_CROSS,(3,3)) done = False # iteratively erode, dilate, subtract, then OR the image until it's 1 pixel thick while(not done and count < 50 + max_erode_iterations): eroded = cv2.erode(thresh,element) temp = cv2.dilate(eroded,element) temp = cv2.subtract(thresh,temp) skel = cv2.bitwise_or(skel,temp) thresh = eroded.copy() zeros = size - cv2.countNonZero(thresh) if zeros==size: done = True count = count + 1 # canny_image = cv2.Canny(equ, canny_threshold, canny_threshold*2) # perform canny edge detection on blurred image # contours, hierarchy = cv2.findContours(equ, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # find contours from canny image # cv2.drawContours(equ, contours, -1, (255,255,0), 3) # draws contours on canny image end_time = time.time() print "time elapsed: ", (end_time - start_time) final_image = skel # show images using gui cv2.imshow('original', roi) # cv2.imshow('gray_roi', gray_roi) cv2.imshow('final image', final_image) ## TODO extract line_data from final_image return final_image ## end render_image() ## start main procedure # initialize ROS stuff ## TODO find out what name the topic should have linePub = rospy.Publisher('line_data',sensor_msgs.msg.PointCloud2) rospy.init_node('line_detection_GUI', anonymous=True) # create GUI elements (windows and trackbars) # cv2.namedWindow('gray_roi', cv2.WINDOW_AUTOSIZE) # create a window for gray image cv2.namedWindow('final', cv2.WINDOW_AUTOSIZE) # create a window for canny image # cv2.namedWindow('laplacian', cv2.WINDOW_AUTOSIZE) # create a window for laplacian image # cv2.namedWindow('afterBandpass', cv2.WINDOW_AUTOSIZE) # create a window for gray image # cv2.createTrackbar('canny_threshold_trackbar', 'final', canny_threshold, canny_threshold*3, nothing) # create a trackbar for canny threshold # cv2.createTrackbar('bandpass_low_cutoff_trackbar', 'final', bandpass_low_cutoff, 100, nothing) # create a trackbar for low bandpass # cv2.createTrackbar('bandpass_high_cutoff_trackbar', 'final', bandpass_high_cutoff, 1000, nothing) # create a trackbar for high bandpass cv2.createTrackbar('adaptive_threshold_block_size_trackbar', 'final', adaptive_threshold_block_size, 300, nothing) # create a trackbar for adaptive threshold block size # cv2.createTrackbar('adaptive_threshold_C_trackbar', 'final', adaptive_threshold_C, 100, nothing) # create a trackbar for adaptive threshold C value (just an offset) cv2.createTrackbar('blur_size_trackbar', 'final', blur_size, 101, nothing) # create a trackbar for blur size cv2.createTrackbar('global_threshold_trackbar', 'final', global_threshold, 255, nothing) # create a trackbar for global threshold cv2.createTrackbar('hue_low_trackbar', 'final', hue_low, 179, nothing) # create a trackbar for hue_low (hsv threshold) cv2.createTrackbar('hue_high_trackbar', 'final', hue_high, 179, nothing) # create a trackbar for hue_high (hsv threshold) # cv2.createTrackbar('saturation_low_trackbar', 'final', saturation_low, 255, nothing) # create a trackbar for saturation_low (hsv threshold) # cv2.createTrackbar('saturation_high_trackbar', 'final', saturation_high, 255, nothing) # create a trackbar for saturation_high (hsv threshold) # cv2.createTrackbar('value_low_trackbar', 'final', value_low, 255, nothing) # create a trackbar for value_low (hsv threshold) # cv2.createTrackbar('value_high_trackbar', 'final', value_high, 255, nothing) # create a trackbar for value_high (hsv threshold) cv2.createTrackbar('file_number_trackbar', 'final', file_number, 7, nothing) # create a trackbar for file_number threshold cv2.createTrackbar('backprojection_threshold_trackbar', 'final', backprojection_threshold, 255, nothing) # create a trackbar for backprojection threshold # render the image for the first time line_data = render_image() publish_line_data(line_data) # wait for user key (such as arrow keys) and render image again if trackbars change; quit upon ESC hit. while 1: k = cv2.waitKey(0) & 0xFF if k == 27: # wait for ESC key to exit break # # if canny threshold (trackbar) has changed, render image again # if (canny_threshold != cv2.getTrackbarPos('canny_threshold_trackbar', 'final')): # canny_threshold = cv2.getTrackbarPos('canny_threshold_trackbar', 'final') # # make sure value is greater than zero # canny_threshold = canny_threshold + 1 if canny_threshold == 0 else canny_threshold # render_image() # else if adap. block size (trackbar) has changed, render image again if (adaptive_threshold_block_size != cv2.getTrackbarPos('adaptive_threshold_block_size_trackbar', 'final')): adaptive_threshold_block_size = cv2.getTrackbarPos('adaptive_threshold_block_size_trackbar', 'final') # make sure value is odd and more than 2 adaptive_threshold_block_size = adaptive_threshold_block_size + 1 if adaptive_threshold_block_size % 2 == 0 else adaptive_threshold_block_size adaptive_threshold_block_size = adaptive_threshold_block_size + 2 if adaptive_threshold_block_size == 1 else adaptive_threshold_block_size line_data = render_image() publish_line_data(line_data) # # else if adap. block size (trackbar) has changed, render image again # if (adaptive_threshold_C != cv2.getTrackbarPos('adaptive_threshold_C_trackbar', 'final')): # adaptive_threshold_C = cv2.getTrackbarPos('adaptive_threshold_C_trackbar', 'final') # render_image() # else if blur size (trackbar) has changed, render image again if (blur_size != cv2.getTrackbarPos('blur_size_trackbar', 'final')): blur_size = cv2.getTrackbarPos('blur_size_trackbar', 'final') # make sure value is odd and more than 2 blur_size = blur_size + 1 if blur_size % 2 == 0 else blur_size blur_size = blur_size + 2 if blur_size == 1 else blur_size line_data = render_image() publish_line_data(line_data) # else if global threshold (trackbar) has changed, render image again if (global_threshold != cv2.getTrackbarPos('global_threshold_trackbar', 'final')): global_threshold = cv2.getTrackbarPos('global_threshold_trackbar', 'final') line_data = render_image() publish_line_data(line_data) # no need for global_threshold value check (all values are valid) # if (bandpass_low_cutoff != cv2.getTrackbarPos('bandpass_low_cutoff_trackbar', 'final')): # bandpass_low_cutoff = cv2.getTrackbarPos('bandpass_low_cutoff_trackbar', 'final') # render_image() # if (bandpass_high_cutoff != cv2.getTrackbarPos('bandpass_high_cutoff_trackbar', 'final')): # bandpass_high_cutoff = cv2.getTrackbarPos('bandpass_high_cutoff_trackbar', 'final') # # set high value to be at least low value # bandpass_high_cutoff = bandpass_low_cutoff if bandpass_high_cutoff < bandpass_low_cutoff else bandpass_high_cutoff # render_image() # else if hue_low (trackbar) has changed, render image again if (hue_low != cv2.getTrackbarPos('hue_low_trackbar', 'final')): hue_low = cv2.getTrackbarPos('hue_low_trackbar', 'final') line_data = render_image() publish_line_data(line_data) # else if hue_high (trackbar) has changed, render image again if (hue_high != cv2.getTrackbarPos('hue_high_trackbar', 'final')): hue_high = cv2.getTrackbarPos('hue_high_trackbar', 'final') line_data = render_image() publish_line_data(line_data) # # else if saturation_low (trackbar) has changed, render image again # if (saturation_low != cv2.getTrackbarPos('saturation_low_trackbar', 'final')): # saturation_low = cv2.getTrackbarPos('saturation_low_trackbar', 'final') # render_image() # # else if saturation_high (trackbar) has changed, render image again # if (saturation_high != cv2.getTrackbarPos('saturation_high_trackbar', 'final')): # saturation_high = cv2.getTrackbarPos('saturation_high_trackbar', 'final') # render_image() # # else if value_low (trackbar) has changed, render image again # if (value_low != cv2.getTrackbarPos('value_low_trackbar', 'final')): # value_low = cv2.getTrackbarPos('value_low_trackbar', 'final') # render_image() # # else if value_high (trackbar) has changed, render image again # if (value_high != cv2.getTrackbarPos('value_high_trackbar', 'final')): # value_high = cv2.getTrackbarPos('value_high_trackbar', 'final') # render_image() # else if backprojection_threshold (trackbar) has changed, render image again if (backprojection_threshold != cv2.getTrackbarPos('backprojection_threshold_trackbar', 'final')): backprojection_threshold = cv2.getTrackbarPos('backprojection_threshold_trackbar', 'final') line_data = render_image() publish_line_data(line_data) # else if file_number (trackbar) has changed, render image again if (file_number != cv2.getTrackbarPos('file_number_trackbar', 'final')): file_number = cv2.getTrackbarPos('file_number_trackbar', 'final') file_number = 1 if file_number==0 else file_number line_data = render_image() publish_line_data(line_data) # destroy all objects cv2.destroyAllWindows()
wfriedl/IGVC_Scipio_Software
line_detection/src/snippets/GUI_test.py
Python
gpl-2.0
21,715
[ "Gaussian" ]
883e3ff9435d54fd1c212368be4a7ebdf25bdef81aea446fd91de3c44fbab8cd
#!/usr/bin/env/ python3 """ This is the main point of access for the time-parallel implementation of the APinT method. I/O is handled through the cyclops_control module and pickled dicts, respectively. Functions --------- - `main` -- Exposes the high-level implementation Invocation ---------- See the cyclops_control docs for details of parameters. The test directory contains example calls to Cyclops. Dependencies ------------ - `cyclops suite` - numpy - pyfftw - mpi4py | Author: Adam G. Peddle, Terry Haut | Contact: ap553@exeter.ac.uk | Version: 1.0 """ import numpy as np import sys import pickle import os import cyclops_control import cyclops_base import rswe_direct from spectral_toolbox import SpectralToolbox from rswe_exponential_integrator import * from mpi4py import MPI def main(control): """ Main program. Exposes the Parareal algorithm. Sub-algos are encapsulated in the rswe_direct and rswe_exponential_integrator modules. All control is through the control object. """ if 'working_dir' in control: os.chdir(control['working_dir']) # Set up MPI communicator global comm comm = MPI.COMM_WORLD rank = comm.Get_rank() size = comm.Get_size() # Local parameterisations: if control['outFileStem'] is None: control['outFileStem'] = '' conv_tol = control['conv_tol'] control['final_time'] = control['coarse_timestep'] # For generalisabilty of rswe_direct control['solver'] = None # Idiot-proofing control['Nt'] = size # One coarse timestep per process # Initialise spectral toolbox object st = SpectralToolbox(control['Nx'], control['Lx']) control['HMM_M_bar'] = max(25, int(80*control['HMM_T0'])) # Exponential integrators at different Parareal levels # may be different, e.g. in the case of triple scale # separation. expInt_coarse = ExponentialIntegrator_FullEqs(control) expInt_fine = ExponentialIntegrator_FullEqs(control) # Set up initial (truth) field if control['filename']: XX, YY, ICs = cyclops_base.read_ICs(control, control['filename']) else: # Fall back on default initial Gaussian XX, YY, ICs = cyclops_base.h_init(control) control['filename'] = 'default' st = SpectralToolbox(control['Nx'], control['Lx']) # Process-local representations of macroscopic, microscopic, and previous iterand U_hat_mac_local = np.zeros((3, control['Nx'], control['Nx']), dtype = 'complex') U_hat_mic_local = np.zeros((3, control['Nx'], control['Nx']), dtype = 'complex') U_hat_old_local = np.zeros((3, control['Nx'], control['Nx']), dtype = 'complex') converged = np.zeros((1),dtype=int) # U_hat_mac contains the solution at the completion of the # coarse parareal timesteps. # U_hat_mic contains the solution at the completion of the fine # parareal timesteps, but discards the information in between # (i.e. matches the timesteps of the coarse solution) U_hat_mac = np.zeros((control['Nt'] + 1, 3, control['Nx'], control['Nx']), dtype = 'complex') U_hat_mic = np.zeros((control['Nt'] + 1, 3, control['Nx'], control['Nx']), dtype = 'complex') U_hat_new = np.zeros((control['Nt'] + 1, 3, control['Nx'], control['Nx']), dtype = 'complex') U_hat_old = np.zeros((control['Nt'] + 1, 3, control['Nx'], control['Nx']), dtype = 'complex') if rank == 0: # Root node by convention # Create initial condition U_hat_new[0,:,:,:] = ICs for k in range(3): U_hat_new[0,k,:,:] = st.forward_fft(U_hat_new[0,k,:,:]) U_hat_old[0,:,:,:] = U_hat_new[0,:,:,:] # Compute first parareal level here # TODO: Parallelise this step. It's embarassingly parallel, but # care must be taken not to interfere with the time-parallel # coarse solves. for j in range(control['Nt']): # First parareal level by coarse timestep in serial only U_hat_new[j+1, :, :, :] = rswe_direct.solve('coarse_propagator', control, st, expInt_coarse, U_hat_new[j,:,:,:], invert_fft = False) U_hat_old[j+1, :, :, :] = U_hat_new[j+1, :, :, :] # Further parareal levels computed here k = 0 acc_err = 0. comm.Barrier() while converged[0] == 0: #Scatter from 0 to all local olds comm.Scatter(np.ascontiguousarray(U_hat_old[:-1, :,:,:]), U_hat_old_local, root = 0) # Compute coarse and fine timesteps (parallel) # Average computed in serial. It is parallelisable, # but ideally this would be on a heterogeneous # computing architecture. U_hat_mac_local = rswe_direct.solve('coarse_propagator', control, st, expInt_coarse, U_hat_old_local, invert_fft = False) U_hat_mic_local = rswe_direct.solve('fine_propagator', control, st, expInt_fine, U_hat_old_local, invert_fft = False) #Gather from all local macs and mics to root process comm.Gather(np.ascontiguousarray(U_hat_mic_local), U_hat_mic[1:,:,:,:], root = 0) comm.Gather(np.ascontiguousarray(U_hat_mac_local), U_hat_mac[1:,:,:,:], root = 0) if rank == 0: U_hat_new = np.zeros((control['Nt'] + 1, 3, control['Nx'], control['Nx']), dtype = 'complex') U_hat_new[0, :, :, :] = U_hat_old[0, :, :, :] for j in range(control['Nt']): # Loop over timesteps # Compute and apply Parareal correction (serial) U_hat_new[j+1, :, :, :] = rswe_direct.solve('coarse_propagator', control, st, expInt_coarse, U_hat_new[j,:,:,:], invert_fft = False) U_hat_new[j+1, :, :, :] = U_hat_new[j+1, :, :, :] + (U_hat_mic[j+1, :, :, :] - U_hat_mac[j+1, :, :, :]) # L_inf acc_err = max(acc_err, cyclops_base.compute_L_infty_error(U_hat_old[j+1,:,:,:], U_hat_new[j+1,:,:,:], st)) # Perform convergence checks (iterative error) U_hat_old[:, :, :, :] = U_hat_new[:, :, :, :].copy() #Overwrite previous solution for convergence tests if k > 0 and acc_err < conv_tol: print('Converged with acc_err {}'.format(acc_err)) converged[0] = 1 else: print('Not converged with acc_err {}'.format(acc_err)) converged[0] = 0 acc_err = 0. comm.Bcast(converged, root = 0) comm.Barrier() k+=1 # Post-convergence, handle output if rank == 0: for i in range(control['Nt'] + 1): with open("{}{}_APinT_{}.dat".format(control['filename'], control['outFileStem'], i), 'wb') as f: for k in range(3): U_hat_new[i,k,:,:] = st.inverse_fft(U_hat_new[i,k,:,:]) data = dict() data['time'] = i*control['final_time'] data['u'] = U_hat_new[i, 0, :, :] data['v'] = U_hat_new[i, 1, :, :] data['h'] = cyclops_base.inv_geopotential_transform(control, U_hat_new[i, 2, :, :]) data['control'] = control pickle.dump(data, f) if __name__ == "__main__": control_in = cyclops_control.setup_control(sys.argv[1:]) main(control_in)
AGPeddle/Cyclops
source/cyclops.py
Python
mit
7,911
[ "Gaussian" ]
b34cd63195526b540f4da3d19cacb534cb5fe01a26170b37d16d4cfb427c4f2e
try: from paraview import vtk except: import vtk try: from paraview import numpy_support except: from vtk.util import numpy_support #partie pour s'adapter aux version 5. et 6. de VTK try: from paraview.vtk import vtkFiltersExtraction from paraview.vtk import vtkFiltersVerdict from paraview.vtk import vtkFiltersGeneral from paraview.vtk import vtkCommonTransforms from paraview.vtk import vtkFiltersGeometry except: import vtk as vtkFiltersExtraction import vtk as vtkFiltersVerdict import vtk as vtkFiltersGeneral import vtk as vtkCommonTransforms import vtk as vtkFiltersGeometry from scipy import interpolate import struct import numpy import numpy.ma as ma import copy import os, glob, sys, time #________________________________________________________________________________ def vtk_set_input(filtre, input): """fonction de merde simplement pour la compatibilite a supprimer plus tard... """ try: filtre.SetInputData(input) except: filtre.SetInput(input) return 0 #________________________________________________________________________________ #________________________________________________________________________________ def lire_v3d(acces_fichier, fmt_fichier= "bin", endian= "big" , \ precision = 'i4r8', compter_saut_de_ligne=False): """fonction de lecture d'un fichier v3d Retourne un dictionnaire contenant - 'dims' : dimensions du bloc v3d lu - 'data' : dictionnaire contenant les donnees lues les numpy arrays ne sont pas mis en forme si utilisation de numpy.reshape, attention a l'ordre d'empilement ce doit etre .reshape(dim_k, dim_j, dim_i) compter_saut_de_ligne ne devrait normalement pas etre utilise... Mais peut toujours servir si jamais ca plante sur un cas """ #verification de l'existence du fichier v3d #et ouverture try: fichier_v3d = open(acces_fichier, 'rU') if fmt_fichier=='fmt'\ else open(acces_fichier, 'rb') except IOError: raise IOError, "le fichier n'existe pas" return 1 else: print 'Lecture %s'%(acces_fichier) #dictionnaire de lecture binaire dictionnaire_fmt_binaire = {"i4":[4,"l"], "i8":[8,"q"], "r4":[4,"f"], "r8":[8,"d"], "big":">", "little":"<"} data = {} if fmt_fichier == "bin": #options de lecture pour struct.unpack ordre_bin = '>' if endian == 'big' else '<' fmt_entier = "l" if precision[:2] == 'i4' else 'q' taille_entier = int(precision[1]) fmt_reel = "f" if precision[-2:] == 'i4' else 'd' taille_reel = int(precision[3]) a_lire = struct.unpack(ordre_bin + 'l', fichier_v3d.read(4))[0] nb_vars = struct.unpack(ordre_bin + fmt_entier, fichier_v3d.read(a_lire))[0] if struct.unpack(ordre_bin + 'l', fichier_v3d.read(4))[0] != a_lire: raise Exception, 'erreur au cours de la lecture' for incr_var in xrange(nb_vars): a_lire = struct.unpack(ordre_bin + 'l', fichier_v3d.read(4))[0] nom_var = struct.unpack('20s', fichier_v3d.read(a_lire))[0] var=nom_var.replace('va','').replace(' ','') if struct.unpack(ordre_bin + 'l', fichier_v3d.read(4))[0] != a_lire: raise Exception, 'erreur au cours de la lecture' a_lire = struct.unpack(ordre_bin + 'l', fichier_v3d.read(4))[0] if nom_var[:2] == 'va': info_dims = struct.unpack(ordre_bin + '5' + fmt_entier, fichier_v3d.read(a_lire)) else: info_dims = struct.unpack(ordre_bin + '4' + fmt_entier, fichier_v3d.read(a_lire)) if struct.unpack(ordre_bin + 'l', fichier_v3d.read(4))[0] != a_lire: raise Exception, 'erreur au cours de la lecture' dims = numpy.asarray(info_dims[-3:]).astype("Int32") numbloc = info_dims[1 if nom_var[:2] == 'va' else 0] a_lire = struct.unpack(ordre_bin + 'l', fichier_v3d.read(4))[0] vars = struct.unpack(ordre_bin + str(int(a_lire / taille_reel)) + fmt_reel, fichier_v3d.read(a_lire)) vars = numpy.asarray(vars) if struct.unpack(ordre_bin + 'l', fichier_v3d.read(4))[0] != a_lire: raise Exception, 'erreur au cours de la lecture' data[var] = vars elif fmt_fichier == "fmt": nb_vars = int(fichier_v3d.readline().strip()) for var_iter in xrange(nb_vars): tampon = fichier_v3d.readline() nom_var = tampon[:20] length_format = int(tampon[20:40].split('e')[1].split('.')[0]) read = fichier_v3d.readline()[:-1] dims=[] for i in range(len(read) / 6): dims.append(int(read[6 * i: 6 * (i + 1)])) is_variable = (len(dims) == 5) numbloc = dims[1 if is_variable else 0] dims = numpy.asarray(dims[2:] if is_variable else dims[1:]) vars = fichier_v3d.read(length_format * dims.prod() + dims.prod() / 6 + ((dims.prod()%6) != 0)) # vars = fichier_v3d.read(length_format * dims.prod()) # vars = vars.replace('\n','') vars = vars.replace('\n',' ' if compter_saut_de_ligne else '') vars = numpy.fromstring(vars, numpy.dtype('|S' + str(length_format)), count = dims.prod()) vars = numpy.asarray(vars, dtype=numpy.dtype('float')).reshape(dims.prod(),1)[:,0] var=nom_var.replace('va','').replace(' ','') data[var] = vars fichier_v3d.close() OutputDictionnary = {'numbloc': numbloc, 'dims' : dims, 'data' : data} return OutputDictionnary #________________________________________________________________________________ #________________________________________________________________________________ def get_numeros_blocs_non_vides(vtkMultiBlockDataSet): """retourne le numero des blocs non-vides d'un MultiBlockDataset( """ list_of_blocks = [] for numbloc in range(vtkMultiBlockDataSet.GetNumberOfBlocks()): if vtkMultiBlockDataSet.GetBlock(numbloc) != None: if vtkMultiBlockDataSet.GetBlock(numbloc).GetNumberOfPoints() != 0: list_of_blocks.append(numbloc) return list_of_blocks #________________________________________________________________________________ #________________________________________________________________________________ def vtk_new_instance(vtkDataObject): """genere un nouvelle instance de la classe en faisant attention a ce qu'elle ne soit pas instanciee deux fois """ output = vtkDataObject.NewInstance() while(output.GetReferenceCount() > 1): output.UnRegister(None) return output #________________________________________________________________________________ #________________________________________________________________________________ def vtk_new_shallowcopy(vtkDataObject, mode_multibloc=True): """genere un nouvelle instance de la classe en faisant attention a ce qu'elle ne soit pas instanciee deux fois et copie superficielle de l'objet vtk fourni en entree si mode_multibloc, alors les blocs composant le multibloc sont eux aussi shallow copies """ # creation d'un nouvel objet de la meme classe output = vtkDataObject.NewInstance() while(output.GetReferenceCount() > 1): output.UnRegister(None) # ShallowCopy if isinstance(vtkDataObject, vtk.vtkMultiBlockDataSet) and mode_multibloc is True: for numbloc in get_numeros_blocs_non_vides(vtkDataObject): output.SetBlock(numbloc, vtk_new_shallowcopy(vtkDataObject.GetBlock(numbloc)) ) else: output.ShallowCopy(vtkDataObject) return output #________________________________________________________________________________ #______________________________________________________________________________________________________________________________ def ecrire_v3d(acces_fichier, dict_numpy_arrays, dimensions, numbloc=0, fmt_fichier = "bin",\ precision="i4r8", endian="big", type_maillage=False): """Fonction d'ecriture des fichiers au format Voir3D (v3d). dict_numpy_arrays est un dictionnaire qui contient les variables a ecrire dimensions indique les dimensions qui doivent etre ecrites dans le fichier SOUS LA FORME D'UN TUPLE type_maillage conditionne si le fichier v3d ecrit est du type maillage ou donnees (en-tete differents : dans le cas d'une variable, "va " est rajoute devant le nom de la variable au moment de l'ecriture, et le numero de la variable est ecrit avant les dimensions) """ ## definition des dictionnaires: format_binaire = {"i4" : [4,"l"], "i8" : [8,"q"], "r4" : [4,"f"], "r8" : [8,"d"], "big":">", "little" : "<"} nb_vars = len(dict_numpy_arrays) if nb_vars == 0: print 'None to save' return 1 ## ouverture du fichier: try: fichier_v3d = open(acces_fichier,"w" if fmt_fichier == 'fmt' else "wb") except: raise IOError, "Le fichier ne peut pas etre ecrit" else: print 'Ecriture %s'%(acces_fichier) ## ecriture du fichier: if fmt_fichier == "fmt": fichier_v3d.write((5*" " + str(nb_vars))[-5:] + "\n") incr_var = 0 for nom_var in dict_numpy_arrays if not type_maillage else ['x', 'y', 'z']: numpyArray = dict_numpy_arrays[nom_var] while len(dimensions) < 3: dimensions += (1,) numpyArray = numpyArray.ravel() if not type_maillage: var = "va " + nom_var else: var = nom_var if type_maillage: fichier_v3d.write((var + 20*" ")[:20] + (20*" " + "6e15.7")[-20:] + "\n" ) fichier_v3d.write((6*" " + str(numbloc))[-6:] +\ (6*" " + str(dimensions[0]))[-6:] +\ (6*" " + str(dimensions[1]))[-6:] +\ (6*" " + str(dimensions[2]))[-6:] + "\n") else: fichier_v3d.write((var + 20*" ")[:20] + (20*" " + "6e14.7")[-20:] + "\n") fichier_v3d.write((6*" " + str(incr_var + 1))[-6:] +\ (6*" " + str(numbloc))[-6:] +\ (6*" " + str(dimensions[0]))[-6:] +\ (6*" " + str(dimensions[1]))[-6:] +\ (6*" " + str(dimensions[2]))[-6:] + "\n") incr_var += 1 incr = 0 if type_maillage: for value in numpyArray: incr += 1 fichier_v3d.write( '{0: 15.7E}{1}'.format(value, "\n" if incr%6 == 0 or incr == numpyArray.size else "")) else: for value in numpyArray: incr += 1 fichier_v3d.write( '{0: 14.7E}{1}'.format(value, "\n" if incr%6 == 0 or incr == numpyArray.size else "")) elif fmt_fichier == "bin": # options de lecture pour struct.unpack ordre_bin = '>' if endian == 'big' else '<' fmt_entier = "l" if precision[:2] == 'i4' else 'q' taille_entier = int(precision[1]) fmt_reel = "f" if precision[-2:] == 'i4' else 'd' taille_reel = int(precision[3]) fichier_v3d.write(struct.pack(ordre_bin + fmt_entier, taille_entier)) fichier_v3d.write(struct.pack(ordre_bin + fmt_entier, nb_vars)) fichier_v3d.write(struct.pack(ordre_bin + fmt_entier, taille_entier)) incr_var = 0 for nom_var in dict_numpy_arrays if not type_maillage else ['x', 'y', 'z']: numpyArray = dict_numpy_arrays[nom_var] while len(dimensions) < 3: dimensions += (1,) numpyArray = numpyArray.ravel() if not type_maillage: var = ("va " + nom_var + 20 * " ")[:20] else: var = (nom_var + 20 * " ")[:20] fichier_v3d.write(struct.pack(ordre_bin + "l", 20)) fichier_v3d.write(struct.pack("20s", var)) fichier_v3d.write(struct.pack(ordre_bin + "l" ,20)) if type_maillage: fichier_v3d.write(struct.pack(ordre_bin + "l", 4 * taille_entier)) fichier_v3d.write(struct.pack(ordre_bin + "4" + fmt_entier, numbloc, dimensions[0], dimensions[1], dimensions[2]) ) fichier_v3d.write(struct.pack(ordre_bin + "l", 4 * taille_entier)) else: fichier_v3d.write(struct.pack(ordre_bin + "l", 5 * taille_entier)) fichier_v3d.write(struct.pack(ordre_bin + "5" + fmt_entier, incr_var + 1 , numbloc, dimensions[0], dimensions[1], dimensions[2]) ) fichier_v3d.write(struct.pack(ordre_bin + "l", 5 * taille_entier)) incr_var += 1 fichier_v3d.write(struct.pack(ordre_bin + "l", numpy.prod(dimensions) * taille_reel)) for value_point in numpyArray: fichier_v3d.write(struct.pack(ordre_bin + fmt_reel, value_point)) fichier_v3d.write(struct.pack(ordre_bin + "l", numpy.prod(dimensions) * taille_reel)) fichier_v3d.close() return 0 #______________________________________________________________________________________________________________________________ #_____________________________________________________________________________________ def modifier_triedre(input, nv_triedre): """manipulation du triedre (i, j, k) s'applique a un vtkStructuredGrid ou a un MultiBlockDataSet """ if isinstance(input, vtk.vtkMultiBlockDataSet): maillage_new = vtk.vtkMultiBlockDataSet() for numbloc in get_numeros_blocs_non_vides(input): bloc_new = modifier_triedre(input = input.GetBlock(numbloc), nv_triedre = nv_triedre) maillage_new.SetBlock(numbloc, bloc_new) return maillage_new output = vtk.vtkStructuredGrid() # changement des dimensions dims = input.GetDimensions() output.SetDimensions([dims[abs(nv_triedre[0]) - 1], dims[abs(nv_triedre[1]) - 1], dims[abs(nv_triedre[2]) - 1]]) # lecture des coordonnees des points coords = numpy_support.vtk_to_numpy(input.GetPoints().GetData()) coords = coords.reshape([dims[2], dims[1] , dims[0], 3]).transpose(2, 1, 0, 3) # modification du triedre new_coords = copy.deepcopy(numpy.ascontiguousarray(coords)) new_coords = new_coords[::numpy.sign(nv_triedre[0]), ::numpy.sign(nv_triedre[1]), ::numpy.sign(nv_triedre[2])] new_coords = new_coords.transpose(abs(nv_triedre[0]) - 1, abs(nv_triedre[1]) - 1, abs(nv_triedre[2]) - 1, 3) # remise dans l'ordre k,j,i pour stockage en vtk puis ravel new_coords = new_coords.transpose(2, 1, 0, 3).reshape(new_coords.size / 3, 3) # modification des points du inputs nv_points = vtk.vtkPoints() nv_points.SetData(numpy_support.numpy_to_vtk( numpy.ascontiguousarray(new_coords), deep = 1)) output.SetPoints(nv_points) output.Update() # il faut modifier aussi l'ordre des donnees aux points for numarray in range(input.GetPointData().GetNumberOfArrays()): nb_composantes = input.GetPointData().GetArray(numarray).GetNumberOfComponents() data = numpy_support.vtk_to_numpy(input.GetPointData().GetArray(numarray)).reshape( dims + (nb_composantes, )).transpose(2, 1, 0, nb_composantes) nv_data = numpy.array(data) nv_data = nv_data[::numpy.sign(nv_triedre[0]), ::numpy.sign(nv_triedre[1]), ::numpy.sign(nv_triedre[2])] nv_data = nv_data.transpose(abs(nv_triedre[0]) - 1, abs(nv_triedre[1]) - 1, abs(nv_triedre[2]) - 1, nb_composantes) # remise dans l'ordre k,j,i pour stockage en vtk puis ravel nv_data = nv_data.transpose(2, 1, 0, nb_composantes).reshape(nv_data.size / nb_composantes, nb_composantes) vtkArray = numpy_support.numpy_to_vtk(numpy.ascontiguousarray(nv_data), deep = 1) vtkArray.SetName(input.GetPointData().GetArrayName(numarray)) output.GetPointData().AddArray(vtkArray) # il faut modifier aussi l'ordre des donnees aux cellules for numarray in range(input.GetCellData().GetNumberOfArrays()): nb_composantes = input.GetCellData().GetArray(numarray).GetNumberOfComponents() data = numpy_support.vtk_to_numpy(input.GetCellData().GetArray(numarray)).reshape( dims[2] - 1, dims[1] - 1, dims[0] - 1, nb_composantes).transpose(2, 1, 0, nb_composantes) nv_data = numpy.array(data) nv_data = nv_data[::numpy.sign(nv_triedre[0]), ::numpy.sign(nv_triedre[1]), ::numpy.sign(nv_triedre[2])] nv_data = nv_data.transpose(abs(nv_triedre[0]) - 1, abs(nv_triedre[1]) - 1, abs(nv_triedre[2]) - 1, nb_composantes) # remise dans l'ordre k,j,i pour stockage en vtk puis ravel nv_data = nv_data.transpose(2, 1, 0, nb_composantes).reshape(nv_data.size / nb_composantes, nb_composantes) vtkArray = numpy_support.numpy_to_vtk(numpy.ascontiguousarray(nv_data), deep = 1) vtkArray.SetName(input.GetCellData().GetArrayName(numarray)) output.GetCellData().AddArray(vtkArray) print 'Modification du triedre faite : ', nv_triedre return output #_____________________________________________________________________________________ #_____________________________________________________________________________________ def get_variables_in_function(function): """retourne une liste contenant les variables necessaires au calcul de la fonction""" op_vect = ['grad', 'div', 'rot', 'lapl'] op_calc = ['(', ')', '+', '-', '*', '/', '.', '^', 'abs', 'acos', 'asin', 'atan', 'ceil', 'cos', 'cosh', 'exp', 'floor', 'log', 'max', 'min', 'mag', 'norm', 'sign', 'sin', 'sinh', 'sqrt', 'tan', 'tanh', 'iHat','jHat','kHat', 'arccos', 'arcsin', 'arctan'] liste_vars = [] read_g=0 read_d=1 while read_g < len(function): bloc = function[read_g:read_d] if bloc in op_calc or bloc.isdigit() or bloc == ' ' or bloc == '': read_g = read_d elif bloc in op_vect: i=0 nb_parentheses = 0 Loop = True while Loop: if function[read_d + i] == '(': nb_parentheses = nb_parentheses + 1 if function[read_d + i] == ')': nb_parentheses = nb_parentheses - 1 if nb_parentheses == 0: Loop = False ##liste_vars.append(function[read_d + 1:read_d+i]) liste_vars.append((bloc + '('+function[read_d + 1:read_d+i]+')').replace(' ', '')) #liste_vars.append((function[read_d + 1:read_d+i]).replace(' ', '')) read_g = read_d + i + 1 read_d = read_g i = i+1 elif bloc[-1] in op_calc: read_g = read_d liste_vars.append(bloc[:-1].replace(' ', '')) elif read_d == len(function): read_g = read_d liste_vars.append(bloc.replace(' ', '')) read_d = read_d + 1 if len(liste_vars) == 1 and liste_vars[0] == function: liste_vars[0] = liste_vars[0][liste_vars[0].find('(') + 1 : liste_vars[0].rfind(')')] liste_a_parcourir = copy.copy(liste_vars) for name in liste_a_parcourir: while liste_vars.count(name) > 1: liste_vars.remove(name) return liste_vars #_____________________________________________________________________________________ #_____________________________________________________________________________________ def get_noms_arrays_presents(input, loc='points'): """retourne la liste des array presents si multibloc, alors ne regarde que le premier bloc non vide si loc est different de points, alors regarde aux cellules """ if isinstance(input, vtk.vtkMultiBlockDataSet): names = get_noms_arrays_presents(input.GetBlock(get_numeros_blocs_non_vides(input)[0]), loc = loc) else: bloc = input names = [] for numarray in range(bloc.GetPointData().GetNumberOfArrays() if loc == 'points' else bloc.GetCellData().GetNumberOfArrays()): names.append(bloc.GetPointData().GetArray(numarray).GetName() if loc == 'points' else bloc.GetCellData().GetArray(numarray).GetName()) return names #_____________________________________________________________________________________ #_____________________________________________________________________________________ def renommer_arrays(input, dict_renommer, loc='points'): """renomme les arrays. dict_renommer est un dictionnaire {ancien_nom: nouveau_nom, ... } """ output = vtk_new_shallowcopy(input) if isinstance(input, vtk.vtkMultiBlockDataSet): for numbloc in get_numeros_blocs_non_vides(input): output.SetBlock(numbloc, renommer_arrays(input.GetBlock(numbloc), dict_renommer, loc)) else: data = input.GetPointData() if loc == 'points' else input.GetCellData() for old_name in dict_renommer: data.GetArray(old_name).SetName(dict_renommer[old_name]) return output #_____________________________________________________________________________________ #_____________________________________________________________________________________ def set_scalaires_actifs(input, array_name, loc = 'points'): """definit un champ de scalaires comme actif aux points ou cellules mono ou multi blocs """ output = vtk_new_shallowcopy(input) if not array_name in get_noms_arrays_presents(output, loc='points') \ + get_noms_arrays_presents(output, loc='cellules'): raise IOError, "output n'a pas d'array {0}".format(array_name) if isinstance(output, vtk.vtkMultiBlockDataSet): for numbloc in get_numeros_blocs_non_vides(output): output.SetBlock(numbloc, set_scalaires_actifs( input.GetBlock(numbloc), loc = loc, array_name = array_name)) else: output.GetPointData().SetActiveScalars(array_name) if loc == 'points' \ else output.GetCellData().SetActiveScalars(array_name) return output #_____________________________________________________________________________________ #_____________________________________________________________________________________ def VTKProbe(input, source, tolerance=None): """probe generique - mono/multi bloc par mono/multi bloc input contient la GEOMETRIE sur laquelle interpoler les donnees NOUVEAU source contient le maillage qui contient les donnees ANCIEN tolerance permet de faire des trucs. Essayer des valeurs, 0.1 par exemple. Voir la doc du vtkProbeFilter. """ balise = time.time() if isinstance(input, vtk.vtkMultiBlockDataSet): output = vtk.vtkMultiBlockDataSet() for numbloc in get_numeros_blocs_non_vides(input): print "probe par le bloc {0} de input".format(numbloc) output.SetBlock(numbloc, VTKProbe(input = input.GetBlock(numbloc), source = source, tolerance=tolerance)) elif isinstance(source, vtk.vtkMultiBlockDataSet): output = vtk_new_shallowcopy(input) dict_data = {} for numbloc in get_numeros_blocs_non_vides(source): print "probe du bloc {0} de la source".format(numbloc) bloc = VTKProbe(input, source.GetBlock(numbloc), tolerance=tolerance) for nom_array in get_noms_arrays_presents(bloc, loc = 'points'): array = get_vtk_array_as_numpy_array(bloc, nom_array) if dict_data.has_key(nom_array): dict_data[nom_array] += array else: dict_data[nom_array] = array # if len(dict_data.keys()) > 0: # argwhere = numpy.argwhere(dict_data['vtkValidPointMask'] == 0) # dict_data['vtkValidPointMask'][argwhere] = 1 # for nom_array in dict_data: # if nom_array != 'vtkValidPointMask': # if len(dict_data[nom_array].shape) == 2: # for k in range(dict_data[nom_array].shape[1]): # dict_data[nom_array][:, k] /= dict_data['vtkValidPointMask'] # else: # dict_data[nom_array] /= dict_data['vtkValidPointMask'] # # vtk_array = numpy_support.numpy_to_vtk(dict_data[nom_array], deep = 1) # vtk_array.SetName(nom_array) # output.GetPointData().AddArray(vtk_array) for nom_array in dict_data: vtk_array = numpy_support.numpy_to_vtk(dict_data[nom_array], deep = 1) vtk_array.SetName(nom_array) output.GetPointData().AddArray(vtk_array) else: #conversion en polydata -- VTKProbe ne fonctionne pas bien avec les StructuredGrid #et il ne respecte pas non plus la structure des objets... geom = vtk.vtkPolyData() geom.SetPoints(input.GetPoints()) # on cree output et on supprime les arrays qui sont presents dans input # pour eviter la confusion output = vtk_new_shallowcopy(input) for nom_array in get_noms_arrays_presents(input, 'points'): output.GetPointData().RemoveArray(nom_array) for nom_array in get_noms_arrays_presents(input, 'cellules'): output.GetCellData().RemoveArray(nom_array) #verification de l'intersection des boites de input et source bounds_geom = geom.GetBounds() bounds_source = source.GetBounds() if bounds_geom[0] > bounds_source[1] \ or bounds_geom[1] < bounds_source[0] \ or bounds_geom[2] > bounds_source[3] \ or bounds_geom[3] < bounds_source[2] \ or bounds_geom[4] > bounds_source[5] \ or bounds_geom[5] < bounds_source[4]: return output filtre = vtk.vtkProbeFilter() vtk_set_input(filtre, geom) try: filtre.SetSource(source) except: filtre.SetSourceData(source) if tolerance is not None: filtre.ComputeToleranceOff() filtre.SetTolerance(tolerance) print 'TOLERANCE pour vtkProbeFilter reglee manuellement a {0}'.format(tolerance) filtre.Update() #on ajoute ce qu'on a probe a output for nom_array in get_noms_arrays_presents(filtre.GetOutput()): #print nom_array output.GetPointData().AddArray( filtre.GetOutput().GetPointData().GetArray(nom_array)) if numpy.max(get_vtk_array_as_numpy_array(output, 'vtkValidPointMask')) > 1: print 'ATTENTION ATTENTION ATTENTION : verifier le resultat. La sonde intersecte plusieurs domaines.' print '[PERFORMANCE] Operation de VTKProbe faite en {0} secondes'.format(time.time() - balise) return output #_____________________________________________________________________________________ ##_____________________________________________________________________________________ #def interpoler_avec_scipy(source, input, \ #methode_interpolation='nearest', fill_value=0): #"""fonction d'interpolation #- d'un multiblock sur un autre, les blocs etant confondus #- d'un bloc sur un autre #utilise la routine scipy.interpolate.griddata #- input contient le nouveau maillage #- source contient l'ancien maillage, avec les donnees #""" #output = vtk_new_shallowcopy(input) #if isinstance(output, vtk.vtkMultiBlockDataSet): #for numbloc in get_numeros_blocs_non_vides(output): #print "interpolation bloc ", numbloc #output.SetBlock(numbloc, #interpoler_avec_scipy(source = source.GetBlock(numbloc), input = output.GetBlock(numbloc)) #) #else: ## interpolation des donnees aux noeuds #coords_source = numpy_support.vtk_to_numpy(source.GetPoints().GetData()) #coords_input = numpy_support.vtk_to_numpy(output.GetPoints().GetData()) #for numarray in range(source.GetPointData().GetNumberOfArrays()): #nom_array = source.GetPointData().GetArrayName(numarray) #print 'array ', nom_array #z_source = numpy_support.vtk_to_numpy(source.GetPointData().GetArray(numarray)) #z_input = interpolate.griddata( #coords_source, z_source, #coords_input, #method = 'nearest', #fill_value = 0 #) #z_input = numpy_support.numpy_to_vtk(z_input, deep = 1) #z_input.SetName(nom_array) #output.GetPointData().AddArray(z_input) ## interpolation des donnees aux cellules #f = vtk.vtkCellCenters() #f.SetInputData(source) #f.Update() #coords_cellules_source = numpy_support.vtk_to_numpy(f.GetOutput().GetPoints().GetData()) #f = vtk.vtkCellCenters() #f.SetInputData(output) #f.Update() #coords_cellules_input = numpy_support.vtk_to_numpy(f.GetOutput().GetPoints().GetData()) #for numarray in range(source.GetCellData().GetNumberOfArrays()): #nom_array = source.GetCellData().GetArrayName(numarray) #print 'array ', nom_array #z_source = numpy_support.vtk_to_numpy(source.GetCellData().GetArray(numarray)) #z_input = interpolate.griddata( #coords_cellules_source, z_source, #coords_cellules_input, #method = methode_interpolation, #fill_value = fill_value #) #z_input = numpy_support.numpy_to_vtk(z_input, deep = 1) #z_input.SetName(nom_array) #output.GetCellData().AddArray(z_input) #return output ##_____________________________________________________________________________________ #_____________________________________________________________________________________ def interpoler_avec_scipy(source, input, \ methode_interpolation='nearest', fill_value=0): """fonction d'interpolation -- nouvelle version qui pourrait (?) gerer mieux les multibloc - d'un multiblock sur un autre - d'un bloc sur un autre utilise la routine scipy.interpolate.griddata - source = ANCIEN contient l'ancien maillage, avec les donnees - input = NOUVEAU contient le nouveau maillage """ output = vtk_new_shallowcopy(input) if isinstance(output, vtk.vtkMultiBlockDataSet): for numbloc in get_numeros_blocs_non_vides(output): print "interpolation bloc ", numbloc output.SetBlock(numbloc, interpoler_avec_scipy(source = source, input = output.GetBlock(numbloc)) ) else: #a partir de la, output est un bloc et source peut etre un bloc ou un multibloc # interpolation des donnees aux noeuds coords_source = get_vtk_array_as_numpy_array(source, 'coords') coords_input = get_vtk_array_as_numpy_array(output, 'coords') for nom_array in get_noms_arrays_presents(source, loc='points'): print 'array ', nom_array z_source = get_vtk_array_as_numpy_array(source, nom_array) z_input = interpolate.griddata( coords_source, z_source, coords_input, method = methode_interpolation, fill_value = 0 ) z_input = numpy_support.numpy_to_vtk(z_input, deep = 1) z_input.SetName(nom_array) output.GetPointData().AddArray(z_input) # interpolation des donnees aux cellules f = vtkFiltersGeneral.vtkCellCenters() if isinstance(source, vtk.vtkMultiBlockDataSet): source_cells = appliquer_sur_multibloc(f, source) else: vtk_set_input(f, source) f.Update() source_cells = f.GetOutput() coords_cellules_source = get_vtk_array_as_numpy_array(source_cells, 'coords') f = vtkFiltersGeneral.vtkCellCenters() vtk_set_input(f, output) f.Update() coords_cellules_input = get_vtk_array_as_numpy_array(f.GetOutput(), 'coords') for nom_array in get_noms_arrays_presents(source, loc = 'cellules'): print 'array ', nom_array z_source = get_vtk_array_as_numpy_array(source, nom_array) z_input = interpolate.griddata( coords_cellules_source, z_source, coords_cellules_input, method = methode_interpolation, fill_value = fill_value ) z_input = numpy_support.numpy_to_vtk(z_input, deep = 1) z_input.SetName(nom_array) output.GetCellData().AddArray(z_input) return output #_____________________________________________________________________________________ #_____________________________________________________________________________________ def get_cell_centers(input): """Utilise le filtre vtkCellCenters pour retourner les coordonnees des centres des cellules sous forme de numpy array vtkCellCenters is a filter that takes as input any dataset and generates on output points at the center of the cells in the dataset. """ f = vtkFiltersGeneral.vtkCellCenters() vtk_set_input(f, input) f.Update() mesh_centers = f.GetOutput() return get_vtk_array_as_numpy_array(mesh_centers, 'coords') #_____________________________________________________________________________________ #_____________________________________________________________________________________ def lire_fichier_tecplot(acces_fichier, sep=" "): """fonction de lecture d'un fichier au format tecplot ASCII """ f = file(acces_fichier, 'r') f.readline() read = f.readline() read = read.split('"') #liste des variables contenues dans les donnees variables = [read[2*i+1] for i in range(int(len(read)/2))] #lecture des dimensions ligne = f.readline() dims = [] for cle in ['I=', 'J=', 'K=']: if cle in ligne: dims.append(int(ligne.split(cle)[1].split(',')[0])) else: dims.append(1) data_out = {} data_out['dims'] = dims #lecture des donnees read = f.read() read = read.replace('\n', sep).split(sep) while '' in read: read.remove('') #read contient toutes les valeurs dans l'ordre, sous forme de string #Separation des variables NITER = len(read)/len(variables) data_out['data'] = {} for var in range(0, len(variables)): data_out['data'][variables[var]] = [float(read[i]) for i in range(NITER*var, NITER*(var+1))] return data_out #_____________________________________________________________________________________ #_____________________________________________________________________________________ def get_vtk_array_as_numpy_array(input, nom_array, copy=False, loc=None): """fonction qui retourne le array du vtkDataObject donne en input sous forme d'un numpy array en utilisant numpy_support la donnee peut etre indiferemment aux cellules ou aux points si input est un vtkMultiBlockDataSet, alors les donnees sont concatenees dans l'ordre des blocs croissants nom_array peut etre coords, pour avoir les coordonnees des points nom_array peut etre centers, pour avoir les coordonnees des centres des cellules nom_array peut etre polys pour avoir l'arbre de connectivite (pour un vtkPolyData uniquement) Pour un maillage non-structure : - nom_array peut etre cells - nom_array peut etre cellstypes - nom_array peut etre cellslocations Si loc est None, alors la donnee est recherchee en priorite aux points, puis aux cellules si elle n'a pas ete trouvee aux points. Si loc est donnee, alors on ne cherche que aux points ou au cellules loc = 'points' pour chercher la donnee aux points uniquement loc = <autre chose> pour chercher la donnee aux cellules uniquement """ # cas multibloc if isinstance(input, vtk.vtkMultiBlockDataSet): output = None for numbloc in get_numeros_blocs_non_vides(input): if output is None: output = get_vtk_array_as_numpy_array(input.GetBlock(numbloc), nom_array) elif nom_array == 'polys': raise IOError, 'pas disponible pour le cas multibloc' elif nom_array == 'cells': raise IOError, 'pas disponible pour le cas multibloc' elif nom_array == 'cellstypes': raise IOError, 'pas disponible pour le cas multibloc' elif nom_array == 'cellslocations': raise IOError, 'pas disponible pour le cas multibloc' else: output = numpy.concatenate(( output, get_vtk_array_as_numpy_array(input.GetBlock(numbloc), nom_array)), axis = 0) # cas monobloc else: if nom_array == 'coords': output = numpy_support.vtk_to_numpy(input.GetPoints().GetData()) elif nom_array == 'centers': output = get_cell_centers(input) elif nom_array == 'polys': output = numpy_support.vtk_to_numpy(input.GetPolys().GetData()) # if output.size % 4 == 0: # output = output.reshape(output.size / 4, 4)[:, 1:] # elif output.size % 5 == 0: # output = output.reshape(output.size / 5, 5)[:, 1:] # else: # raise IOError, "type de cellule non reconnu" elif nom_array == 'cells': output = numpy_support.vtk_to_numpy(input.GetCells().GetData()) elif nom_array == 'cellstypes': output = numpy_support.vtk_to_numpy(input.GetCellTypesArray()) elif nom_array == 'cellslocations': output = numpy_support.vtk_to_numpy(input.GetCellLocationsArray()) else: if loc == None: array_aux_points = input.GetPointData().HasArray(nom_array) array_aux_cellules = input.GetCellData().HasArray(nom_array) if array_aux_points and array_aux_cellules: raise Exception, "conflit : {0} est present aux noeuds ET aux cellules".format(nom_array) elif not array_aux_points and not array_aux_cellules: raise Exception, "erreur : {0} n'est present NI aux noeuds NI aux cellules".format(nom_array) if array_aux_points: print 'La donnee est aux POINTS' loc_data = input.GetPointData() else: print 'La donnee est aux CELLULES' loc_data = input.GetCellData() else: loc_data = input.GetPointData() if loc == 'points' else input.GetCellData() output = numpy_support.vtk_to_numpy(loc_data.GetArray(nom_array)) if copy == True: output = numpy.array(output) return output #_____________________________________________________________________________________ #_____________________________________________________________________________________ def calculer_surfaces_cellules(input, retourner_surface_totale=False): """fonction qui ajoute aux cellules la valeur de l'aire de chacune des cellules mono/multi bloc(s) s'appuie sur la classe vtk.vtkCellQuality """ if isinstance(input, vtk.vtkMultiBlockDataSet): output = vtk.vtkMultiBlockDataSet() aire_surface = 0 for numbloc in get_numeros_blocs_non_vides(input): bloc, aire_surface_bloc = calculer_surfaces_cellules(input.GetBlock(numbloc), retourner_surface_totale = True) output.SetBlock(numbloc, bloc) aire_surface += aire_surface_bloc else: if isinstance(input, vtk.vtkStructuredGrid): dimensions = list(input.GetDimensions()) if dimensions.count(1) != 1: raise IOError, "le vtkStructuredGrid en entree n'est pas une surface" f = vtkFiltersVerdict.vtkMeshQuality() vtk_set_input(f, input) f.SetTriangleQualityMeasureToArea() f.SetQuadQualityMeasureToArea() f.SetTetQualityMeasureToVolume() f.SetHexQualityMeasureToVolume() f.Update() output = f.GetOutput() output.GetCellData().GetArray('Quality').SetName('CellSurface') aire_surface = numpy.sum( get_vtk_array_as_numpy_array(output, 'CellSurface')) if retourner_surface_totale == True: return output, aire_surface else: return output #_____________________________________________________________________________________ #_____________________________________________________________________________________ def integrer_sur_la_surface(input, array_a_integrer, array_poids = 'CellSurface', remplacer_nan=False): """fonction qui integre in array sur une surface. Aux cellules ou aux noeuds. mono/multi bloc(s) possiblite d'indiquer 1 pour array_a_integrer Si remplacer_nan est vrai, alors les nan sont remplaces par zero array_a_integrer et array_poids doivent deja etre presents aux cellules ou aux noeuds array aux points et poids aux cellules --> on interpole array aux cellules array aux cellules et poids aux points --> on interpole poids aux cellules """ for array in [array_a_integrer, array_poids]: if not(array in get_noms_arrays_presents(input, loc = 'cellules') or array in get_noms_arrays_presents(input, loc = 'points')) \ and array != 1: raise IOError, '{0} present ni aux noeuds ni aux cellules'.format(array) if isinstance(input, vtk.vtkMultiBlockDataSet): resultat_integration = 0 for numbloc in get_numeros_blocs_non_vides(input): resultat_integration_bloc = integrer_sur_la_surface(input.GetBlock(numbloc), array_a_integrer, array_poids) resultat_integration += resultat_integration_bloc else: #si array_a_integrer est egal a 1 if array_a_integrer == 1: array_poids = get_vtk_array_as_numpy_array(input, array_poids) array_a_integrer = numpy.ones(shape=array_poids.shape) #les deux sont situes au meme endroit elif array_a_integrer in get_noms_arrays_presents(input, 'points') and \ array_poids in get_noms_arrays_presents(input, 'points') or \ array_a_integrer in get_noms_arrays_presents(input, 'cellules') and \ array_poids in get_noms_arrays_presents(input, 'cellules'): array_a_integrer = get_vtk_array_as_numpy_array(input, array_a_integrer) array_poids = get_vtk_array_as_numpy_array(input, array_poids) #array aux points et poids aux cellules --> on interpole array aux cellules elif array_a_integrer in get_noms_arrays_presents(input, 'points') and \ array_poids in get_noms_arrays_presents(input, 'cellules'): array_poids = get_vtk_array_as_numpy_array(input, array_poids) f = vtk.vtkPointDataToCellData() #f.SetInputData(input) vtk_set_input(f, input) f.Update() array_a_integrer = get_vtk_array_as_numpy_array(f.GetOutput(), array_a_integrer) #array aux cellules et poids aux points --> on interpole poids aux cellules elif array_a_integrer in get_noms_arrays_presents(input, 'cellules') and \ array_poids in get_noms_arrays_presents(input, 'points'): array_poids = get_vtk_array_as_numpy_array(input, array_poids) f = vtk.vtkCellDataToPointData() #f.SetInputData(input) vtk_set_input(f, input) f.Update() array_a_integrer = get_vtk_array_as_numpy_array(f.GetOutput(), array_a_integrer) if remplacer_nan: print 'Remplacement dans array_a_integrer de {0} NaN'.format( numpy.where(numpy.isnan(array_a_integrer))[0].size) array_a_integrer = numpy.nan_to_num(array_a_integrer) print 'Remplacement dans array_poids de {0} NaN'.format( numpy.where(numpy.isnan(array_poids))[0].size) array_poids = numpy.nan_to_num(array_poids) resultat_integration = numpy.sum(array_a_integrer * array_poids) return resultat_integration #_____________________________________________________________________________________ #_____________________________________________________________________________________ def interpoler_cellules_aux_points(input): """interpolation CellDataToPointData """ f = vtk.vtkCellDataToPointData() if isinstance(input, vtk.vtkMultiBlockDataSet): output = appliquer_sur_multibloc(f, input) else: vtk_set_input(f, input) f.Update() output = f.GetOutput() return output #_____________________________________________________________________________________ #_____________________________________________________________________________________ def interpoler_points_aux_cellules(input): """interpolation CellDataToPointData """ f = vtk.vtkPointDataToCellData() if isinstance(input, vtk.vtkMultiBlockDataSet): output = appliquer_sur_multibloc(f, input) else: vtk_set_input(f, input) f.Update() output = f.GetOutput() return output #_____________________________________________________________________________________ #_____________________________________________________________________________________ def ajouter_suffixe_aux_arrays(input, suffixe): """ajoute un suffixe a tous les arrays de input sert quand on veut resampler deux dataset ensemble par exemple """ if isinstance(input, vtk.vtkMultiBlockDataSet): output = vtk.vtkMultiBlockDataSet() for numbloc in get_numeros_blocs_non_vides(input): output.SetBlock(numbloc, ajouter_suffixe_aux_arrays(input.GetBlock(numbloc), suffixe) ) else: output = vtk_new_shallowcopy(input) for nom_array in get_noms_arrays_presents(output, 'points'): array = numpy_support.vtk_to_numpy(output.GetPointData().GetArray(nom_array)) varray = numpy_support.numpy_to_vtk(array, deep = 1) varray.SetName(nom_array + suffixe) output.GetPointData().AddArray(varray) output.GetPointData().RemoveArray(nom_array) for nom_array in get_noms_arrays_presents(output, 'cellules'): array = get_vtk_array_as_numpy_array(output.GetCellData().GetArray(nom_array)) varray = numpy_support.numpy_to_vtk(array, deep = 1) varray.SetName(nom_array + suffixe) output.GetCellData().AddArray(varray) output.GetCellData().RemoveArray(nom_array) return output #_____________________________________________________________________________________ #_____________________________________________________________________________________ def Rotation(input, alpha, axe=2): """ancien appel, avec majsucule -- pour compatibilite seulement. NE PAS UTILISER """ return rotation(input, alpha, axe) #_____________________________________________________________________________________ #_____________________________________________________________________________________ def rotation(input, alpha, axe=2): """ Rotation d'un input autour de l'axe - alpha en DEGRES L'axe est indique par son numero 0 pour x, 1 pour y, 2 pour z Utile principalement pour dupliquer des canaux fait appel a vtkTransform et a vtkArrayCalculator pour faire tourner les vecteurs (arrays a trois composantes) """ if isinstance(input, vtk.vtkMultiBlockDataSet): output = vtk_new_instance(input) for numbloc in get_numeros_blocs_non_vides(input): output.SetBlock(numbloc, rotation(input.GetBlock(numbloc), alpha, axe=axe) ) return output else: #pour eviter que vtkTransform ne tourne un vecteur au points #VTK ne permet apparait que l'ajout d'un seul vecteur aux points from time import time t1 = time() input.GetPointData().SetActiveVectors(None) transform = vtkCommonTransforms.vtkTransform() if axe == 0: print 'rotation x' transform.RotateX(alpha) elif axe == 1: print 'rotation y' transform.RotateY(alpha) elif axe == 2: print 'rotation z' transform.RotateZ(alpha) transformFilter = vtkFiltersGeneral.vtkTransformFilter() #transformFilter.SetInputData(input) vtk_set_input(transformFilter, input) transformFilter.SetTransform(transform) transformFilter.Update() output = transformFilter.GetOutput() # rotation des vecteurs aux points NomsVecteurs = [] for i in range(input.GetPointData().GetNumberOfArrays()): if input.GetPointData().GetArray(i).GetNumberOfComponents() == 3: NomsVecteurs.append(input.GetPointData().GetArrayName(i)) for vect in NomsVecteurs: print 'rotation du vecteur ', vect narray = get_vtk_array_as_numpy_array(output, vect, loc='points') narray_new = numpy.ones(narray.shape) alpha_rad = numpy.deg2rad(alpha) if axe == 0: narray_new[:, 0] = (narray[:, 0]) narray_new[:, 1] = ((narray[:, 1]) * numpy.cos(alpha_rad)-(narray[:, 2]) * numpy.sin(alpha_rad)) narray_new[:, 2] = ((narray[:, 2]) * numpy.cos(alpha_rad) + (narray[:, 1]) * numpy.sin(alpha_rad)) elif axe == 1: narray_new[:, 1] = (narray[:, 1]) narray_new[:, 2] = ((narray[:, 2]) * numpy.cos(alpha_rad)-(narray[:, 0]) * numpy.sin(alpha_rad)) narray_new[:, 0] = ((narray[:, 0]) * numpy.cos(alpha_rad) + (narray[:, 2]) * numpy.sin(alpha_rad)) elif axe == 2: narray_new[:, 2] = (narray[:, 2]) narray_new[:, 0] = ((narray[:, 0]) * numpy.cos(alpha_rad)-(narray[:, 1]) * numpy.sin(alpha_rad)) narray_new[:, 1] = ((narray[:, 1]) * numpy.cos(alpha_rad) + (narray[:, 0]) * numpy.sin(alpha_rad)) else: raise IOError, "gni -- moi pas comprendre l'axe de rotation" output = ajouter_numpy_array_as_vtk_array(output, narray_new, vect) # rotation des vecteurs aux cellules NomsVecteurs = [] for i in range(input.GetCellData().GetNumberOfArrays()): if input.GetCellData().GetArray(i).GetNumberOfComponents() == 3: NomsVecteurs.append(input.GetCellData().GetArrayName(i)) for vect in NomsVecteurs: print 'rotation du vecteur ', vect narray = get_vtk_array_as_numpy_array(output, vect, loc='cells') narray_new = numpy.ones(narray.shape) alpha_rad = numpy.deg2rad(alpha) if axe == 0: narray_new[:, 0] = (narray[:, 0]) narray_new[:, 1] = ((narray[:, 1]) * numpy.cos(alpha_rad)-(narray[:, 2]) * numpy.sin(alpha_rad)) narray_new[:, 2] = ((narray[:, 2]) * numpy.cos(alpha_rad) + (narray[:, 1]) * numpy.sin(alpha_rad)) elif axe == 1: narray_new[:, 1] = (narray[:, 1]) narray_new[:, 2] = ((narray[:, 2]) * numpy.cos(alpha_rad)-(narray[:, 0]) * numpy.sin(alpha_rad)) narray_new[:, 0] = ((narray[:, 0]) * numpy.cos(alpha_rad) + (narray[:, 2]) * numpy.sin(alpha_rad)) elif axe == 2: narray_new[:, 2] = (narray[:, 2]) narray_new[:, 0] = ((narray[:, 0]) * numpy.cos(alpha_rad)-(narray[:, 1]) * numpy.sin(alpha_rad)) narray_new[:, 1] = ((narray[:, 1]) * numpy.cos(alpha_rad) + (narray[:, 0]) * numpy.sin(alpha_rad)) else: raise IOError, "gni -- moi pas comprendre l'axe de rotation" output = ajouter_numpy_array_as_vtk_array(output, narray_new, vect) return output #_____________________________________________________________________________________ #_____________________________________________________________________________________ def redimensionner(input, coefficient): """ redimensionne un maillage, sans toucher aux donnees aux points typiquement pour passer de mm en metres ou inversement """ if isinstance(input, vtk.vtkMultiBlockDataSet): output = vtk_new_instance(input) for numbloc in get_numeros_blocs_non_vides(input): output.SetBlock(numbloc, redimensionner(input.GetBlock(numbloc), coefficient) ) return output else: #pour eviter que vtkTransform ne tourne un vecteur au points #VTK ne permet apparait que l'ajout d'un seul vecteur aux points input.GetPointData().SetActiveVectors(None) transform = vtkCommonTransforms.vtkTransform() transform.Scale(coefficient, coefficient, coefficient) transformFilter = vtkFiltersGeneral.vtkTransformFilter() #transformFilter.SetInputData(input) vtk_set_input(transformFilter, input) transformFilter.SetTransform(transform) transformFilter.Update() output = transformFilter.GetOutput() return output #_____________________________________________________________________________________ #_____________________________________________________________________________________ def ecrire_fichier_colonnes(acces_fichier=None, dictionnaire_donnees=None, ecrasement=False): """ fonction d'ecriture d'un fichier formatte contenant des donnees en colonnes la premiere ligne contient les noms des variables, les lignes suivantes contiennent les donnees les donnees contenues dans le dictionnaire peuvent ne pas contenir le meme nombre de donnees """ # serie de tests prealables a l'ecriture du fichier if acces_fichier is None: raise IOError, "acces_fichier n'est pas renseigne" if dictionnaire_donnees is None: raise IOError, "dictionnaire_donnees n'est pas renseigne" if os.path.exists(acces_fichier) and ecrasement is False: raise IOError, "un fichier existe deja a l'emplacement indique" # ecriture du fichier print "Ecriture ", acces_fichier fichier = file(acces_fichier, "w") # en-tetes nombre_de_cles = len(dictionnaire_donnees.keys()) liste_cles = dictionnaire_donnees.keys() for numero_cle in range(nombre_de_cles): fichier.write(liste_cles[numero_cle]) fichier.write('\n' if numero_cle == nombre_de_cles - 1 else '\t') # ecriture des donnees index = 0 while True: ligne = '' compteur = 0 for cle in liste_cles: if not isinstance(dictionnaire_donnees[cle], list): valeur = [dictionnaire_donnees[cle]] else: valeur = dictionnaire_donnees[cle] if index < len(valeur): compteur += 1 ligne += str(valeur[index]) ligne += "\n" if cle == liste_cles[-1] else '\t' if compteur != 0: fichier.write(ligne) index += 1 else: break fichier.close() #_________________________________________________________________ #_____________________________________________________________________________________ def lire_fichier_colonnes(acces_fichier=None, dtype=str, sep="\t"): """ fonction de lecture d'un fichier formatte contenant des donnees en colonnes la premiere ligne contient les noms des variables les lignes suivantes contiennent les donnees colonnes separees par une tabulation. """ # serie de tests prealables a l'ecriture du fichier if acces_fichier is None: raise IOError, "acces_fichier n'est pas renseigne" if os.path.exists(acces_fichier) is False: raise IOError, "pas de fichier a l'emplacement indique" # ecriture du fichier fichier = file(acces_fichier, "r") bloc = fichier.read() bloc = bloc.split('\n') cles = bloc[0] cles = bloc[0].split(sep) data = dict.fromkeys(cles) for key in data: data[key] = [] for ligne in bloc[1:]: ligne = ligne.split(sep) for num in range(len(ligne)): valeur = ligne[num] cle = cles[num] if valeur != '': data[cle].append(valeur) for cle in data: data[cle] = numpy.array(data[cle], dtype = dtype) return data #_____________________________________________________________________________________ #__________________________________________________________________________________________ def calculer_vecteur_normal(input, normals_aux_cellules=False): """fonction qui calcul le vecteur normal normals_aux_cellules permet d'indiquer si le vecteur normal doit etre calcule aux centres des faces ou au noeuds Dans le cas d'un StructuredGrid, la fonction de calcul de la normale est fait maison Dans le cas d'un PolyData, on utilise le filtre VTKPolyDataNormals """ #on commence par shallow copier pour ne pas modifier l'objet input data = vtk_new_shallowcopy(input) #le cas structure : il est ecrit a la main parce qu'il n'y a pas de filtre #directement disponible dans vtk pour calculer le vecteur normal en structure #une autre solution aurait pu consister a transformer le bloc structure en polydata #puis a utiliser vtkPolyDataNormals, mais il ne semble pas alors qu'il soit possible #de faire les calculs en double precision if isinstance(data, vtk.vtkStructuredGrid): coords = get_vtk_array_as_numpy_array(data, "coords") #lecture shape_bloc et inversion car stockage k, j, i shape_bloc = numpy.asarray(data.GetDimensions())[::-1] if not 1 in shape_bloc: raise IOError, "le StructuredGrid n'est pas une surface" shape_frontiere = numpy.take( shape_bloc, numpy.argwhere(shape_bloc !=1) ).ravel() coords = coords.reshape(tuple(shape_frontiere) + (3,)) #Calcul du vecteur normal aux cellules if normals_aux_cellules: #calcul des diagonales vec1 = (coords[1:, 1:] - coords[:-1, :-1]) vec2 = (coords[1:, :-1] - coords[:-1 ,1:]) #produit vectoriel normals = numpy.cross(vec1, vec2, axis = -1) normals = normals.reshape(data.GetNumberOfCells(), 3) normals /= numpy.apply_along_axis(numpy.linalg.norm, -1, normals)[:, None] Normals = numpy_support.numpy_to_vtk(normals, deep=1) Normals.SetName("Normals") data.GetCellData().AddArray(Normals) #Calcul du vecteur normal aux points elif normals_aux_cellules == False: #___________________________________________________________________________________ def traitement_frontiere(coords_front): """fonction qui calcule le vecteur normal sur une frontiere bien specifique en entree, la liste des coordonnees des points, comme si c'etait une frontiere jmin coords_front shape doit donc etre (2, nb_points, 3) Elle sera appelee ensuite de facon a simplifier le code """ normals_frontiere = numpy.zeros((coords_front.shape[1], 3)) normals_frontiere[:-1] = numpy.cross( coords_front[0, 1:] - coords_front[0, :-1], coords_front[1, :-1] - coords_front[0, :-1], axis = -1) normals_frontiere[-1] = numpy.cross( coords_front[1, -1] - coords_front[0, -1], coords_front[0, -2] - coords_front[0, -1], axis = -1) normals_frontiere /= numpy.linalg.norm(normals_frontiere) return normals_frontiere #___________________________________________________________________________________ #calcul des diagonales pour les points au coeur du maillage vec1 = (coords[2:, 2:] - coords[:-2, :-2]) vec1 = vec1 / numpy.linalg.norm(vec1) vec2 = (coords[2:, :-2] - coords[:-2 ,2:]) vec2 = vec2 / numpy.linalg.norm(vec2) #produit vectoriel normals = numpy.zeros(tuple(shape_frontiere) + (3,)) normals[1:-1, 1:-1] = numpy.cross(vec1, vec2, axis = -1) #frontiere jmin normals[0, :] = traitement_frontiere(coords[:2, :]) #frontiere jmax normals[-1, :] = traitement_frontiere(coords[-2:, :][::-1, ::-1])[::-1] #frontiere imin - sans recalcul de normals au coin normals[1:-1, 0] = traitement_frontiere(coords[1:-1, :2].transpose(1, 0, 2)[:,::-1])[::-1] #frontiere imax normals[1:-1, -1] = traitement_frontiere(coords[1:-1, -2:].transpose(1, 0, 2)[::-1]) #normalisation normals = normals.reshape(data.GetNumberOfPoints(), 3) normals /= numpy.apply_along_axis(numpy.linalg.norm, -1, normals)[:, None] Normals = numpy_support.numpy_to_vtk(normals, deep = 1) Normals.SetName("Normals") data.GetPointData().AddArray(Normals) else: raise IOError, normals_aux_cellules + "... C'est quoi ce bidule ? " #cas d'un polydata : on reutilise simplement le filtre vtkPolyDataNormals elif isinstance(input, vtk.vtkPolyData): normals = vtk.vtkPolyDataNormals() normals.SetComputeCellNormals(normals_aux_cellules) normals.SetComputePointNormals(not normals_aux_cellules) #normals.SetInputData(data) vtk_set_input(normals, data) normals.Update() if normals_aux_cellules == False: data.GetPointData().AddArray( normals.GetOutput().GetPointData().GetArray("Normals")) else: data.GetCellData().AddArray( normals.GetOutput().GetCellData().GetArray("Normals")) #cas d'un multibloc : appel recursif elif isinstance(input, vtk.vtkMultiBlockDataSet): for numbloc in get_numeros_blocs_non_vides(data): data.SetBlock(numbloc, calculer_vecteur_normal(data.GetBlock(numbloc), normals_aux_cellules) ) else: raise IOError return data #__________________________________________________________________________________________ #____________________________________________________________________________ def decaler_paroi(paroi, decalage): """Fonction qui decale une surface selon le vecteur normal la surface doit avoir un vecteur normal aux points la surface doit etre un polydata normals doit etre present """ # cas multibloc if isinstance(paroi, vtk.vtkMultiBlockDataSet): output = vtk_new_instance(paroi) for numbloc in get_numeros_blocs_non_vides(paroi): output.SetBlock(numbloc, decaler_paroi(paroi.GetBlock(numbloc), decalage)) return output # cas monobloc polydata # paroi.GetPointData().SetActiveVectors('Normals') # for nom_array in get_noms_arrays_presents(paroi, 'points'): # if nom_array != 'Normals': # paroi.GetPointData().RemoveArray(nom_array) # for nom_array in get_noms_arrays_presents(paroi, 'cellules'): # paroi.GetCellData().RemoveArray(nom_array) #liste_noms_arrays = get_noms_arrays_presents(data, loc = 'points') #dict_data = dict.fromkeys(liste_noms_arrays) #f = vtkFiltersGeometry.vtkGeometryFilter() ##f.SetInputData(paroi) #vtk_set_input(f, paroi) #f.Update() #paroi = f.GetOutput() # warp = vtkFiltersGeneral.vtkWarpVector() #warp.SetInputData(paroi) # vtk_set_input(warp, paroi) # warp.SetScaleFactor(decalage) # warp.Update() # warp = warp.GetOutput() coords = get_vtk_array_as_numpy_array(paroi, 'coords') normals = get_vtk_array_as_numpy_array(paroi, 'Normals') new_coords = coords + decalage * normals vtk_array = numpy_support.numpy_to_vtk(new_coords, deep=1) point = vtk.vtkPoints() point.SetData(vtk_array) output = vtk_new_shallowcopy(paroi) output.SetPoints(point) return output #____________________________________________________________________________ #____________________________________________________________________________ def appliquer_sur_multibloc(filtre_vtk, multibloc): """applique un filtre VTK de maniere iterative sur un multibloc le filtre_vtk doit etre deja completement configure, sauf Input """ output = vtk.vtkMultiBlockDataSet() for numbloc in get_numeros_blocs_non_vides(multibloc): vtk_set_input(filtre_vtk, multibloc.GetBlock(numbloc)) filtre_vtk.Update() output.SetBlock(numbloc, vtk_new_shallowcopy(filtre_vtk.GetOutput())) return output #____________________________________________________________________________ #__________________________________________________________________________________________ def convertir_en_polydata(input, calculer_vecteur_normal=True): """fonction de conversion en polydata avec fusion des blocs si input est un multiblock utilise les filtres vtkAppendPolyData et vtkGeometryFilter """ # si input est un multiblock, il faut les regrouper en un seul polydata if isinstance(input, vtk.vtkMultiBlockDataSet): appendFilter = vtk.vtkAppendPolyData() for numbloc in get_numeros_blocs_non_vides(input): # un test pour s'adapter aux differentes versions de VTK -- changement de noms des methodes try: appendFilter.AddInput(convertir_en_polydata(input.GetBlock(numbloc))) except: appendFilter.AddInputData(convertir_en_polydata(input.GetBlock(numbloc))) appendFilter.Update() input = appendFilter.GetOutput() # si input n'est pas un multiblock, il faut simplemement le convertir en vtkPolyData elif not isinstance(input, vtk.vtkPolyData): conversion = vtkFiltersGeometry.vtkGeometryFilter() #conversion.SetInputData(input) vtk_set_input(conversion, input) conversion.Update() input = conversion.GetOutput() if calculer_vecteur_normal == True: normals = vtk.vtkPolyDataNormals() vtk_set_input(normals, input) normals.Update() input = normals.GetOutput() return input #__________________________________________________________________________________________ #__________________________________________________________________________________________ def merge_blocs(liste_blocs, merge_points=True): """fonction qui permet de merger des blocs ensemble """ m = vtk.vtkMultiBlockDataSet() for k in range(len(liste_blocs)): m.SetBlock(k, liste_blocs[k]) output = merge_multibloc(m) return output #__________________________________________________________________________________________ #__________________________________________________________________________________________ def merge_multibloc(multibloc, merge_points=True): """fonction de conversion d'une multibloc en polydata avec fusion des blocs si multibloc est un multiblock """ appendFilter = vtk.vtkAppendFilter() if merge_points: try: appendFilter.MergePointsOn() except: print 'ATTENTION : La version de VTK ne permet pas de merger les points (vtkAppendFilter.MergePointsOn)' else: appendFilter.MergePointsOff() for numbloc in get_numeros_blocs_non_vides(multibloc): try: appendFilter.AddInput(convertir_en_polydata(multibloc.GetBlock(numbloc))) except: appendFilter.AddInputData(convertir_en_polydata(multibloc.GetBlock(numbloc))) appendFilter.Update() multibloc = appendFilter.GetOutput() return multibloc #__________________________________________________________________________________________ #__________________________________________________________________________________________ def ajouter_constante(vtkDataObject, nom_constante, valeur): """fonction qui ajoute une constante en chacun des noeuds de l'objet vtk Retourne une copie de vtkDataObject (vtk_new_shallowcopy), avec la constante ajoutee vtkDataObject peut etre indiferemment mono-bloc ou multi-blocs, et consitute de n'importe quel type d'objet vtk qui possede la methode GetPointData() Typiquement, cette fonction peut etre utilisee pour ajouter une constante omega aux noeuds qui est necessaire au calcul des grandeurs ajouter_constante(vtkDataObject, 'omega', 0.0) UNE EXTENSION POURRA ETRE ENVISAGEE POUR PROPOSER D'AJOUTER LA CONSTANTE AUX CENTRES """ if isinstance(vtkDataObject, vtk.vtkMultiBlockDataSet): output = vtk.vtkMultiBlockDataSet() for numbloc in get_numeros_blocs_non_vides(vtkDataObject): output.SetBlock(numbloc, ajouter_constante(vtkDataObject.GetBlock(numbloc), nom_constante, valeur) ) else: output = vtk_new_shallowcopy(vtkDataObject) varray = numpy_support.numpy_to_vtk( numpy.ones(vtkDataObject.GetNumberOfPoints()) * valeur, deep = 1) varray.SetName(nom_constante) output.GetPointData().AddArray(varray) return output #__________________________________________________________________________________________ #__________________________________________________________________________________________ def supprimer_array(vtkDataObject, nom_array, loc='points'): """fonction qui supprime un array """ if isinstance(vtkDataObject, vtk.vtkMultiBlockDataSet): output = vtk.vtkMultiBlockDataSet() for numbloc in get_numeros_blocs_non_vides(vtkDataObject): output.SetBlock(numbloc, supprimer_array(vtkDataObject.GetBlock(numbloc), nom_array, loc) ) else: output = vtk_new_shallowcopy(vtkDataObject) if loc == 'points': output.GetPointData().RemoveArray(nom_array) else: output.GetCellData().RemoveArray(nom_array) return output #__________________________________________________________________________________________ #__________________________________________________________________________________________ def ajouter_numpy_array_as_vtk_array(input, numpy_array, nom): """fonction qui ajoute un numpy array input peut etre un multibloc il faut alors que l'array concatene soit dans le meme ordre que ce qu'il etait en sortie de get_vtk_array_as_numpy_array typiquement, shape(nk, nj, ni),ravel() pour un multibloc dans le cas d'une variables vectorielle, le shape doit alors etre (nb_points, nb_composantes) (nb_cellules, nb_composantes) le numpy_array peut aussi etre donne en ligne (operation numpy.ravel), les composantes dans l'ordre [vx, vy, vz, vx, vy, ...] """ #conversion en numpy.float64 pour eviter des probleme ensuite avec numpy_to_vtk numpy_array = numpy.asarray(numpy_array, dtype=numpy.float64) #on determine en fonction de la longueur du numpy array si la donnee doit aller aux points ou aux cellules #multiblock n'a pas de fonction GetNumberOfCells, alors on somme a la main, en supposant #que c'est un multibloc a une seule profondeur if isinstance(input, vtk.vtkMultiBlockDataSet): nombre_cellules = sum( [input.GetBlock(numbloc).GetNumberOfCells() for numbloc in get_numeros_blocs_non_vides(input)] ) else: nombre_cellules = input.GetNumberOfCells() if numpy_array.size == input.GetNumberOfPoints(): loc = 'points' mode_vecteur = 0 elif numpy_array.size == nombre_cellules: loc = 'cells' mode_vecteur = 0 elif numpy_array.size == input.GetNumberOfPoints() * 3: loc = 'points' mode_vecteur = 1 elif numpy_array.size == nombre_cellules * 3: loc = 'cells' mode_vecteur = 1 else: raise Exception, "taille du numpy array ne correspond ni au nombre de points ni au nombre de cellules" if mode_vecteur == 1: numpy_array = numpy_array.reshape(-1, 3) # print 'nb cells', nombre_cellules # print 'narray size', numpy_array.size # print 'loc', loc # print 'mode_vecteur', mode_vecteur #print "Ajout de l'array numpy" #print 'localisation ', loc #print 'mode vecteur ', mode_vecteur #on cree output, meme instance que input output = vtk_new_shallowcopy(input) #cas multibloc if isinstance(input, vtk.vtkMultiBlockDataSet): num_blocs = get_numeros_blocs_non_vides(input) if loc == "points": liste_nb_par_bloc = [input.GetBlock(numbloc).GetNumberOfPoints() for numbloc in num_blocs] elif loc == "cells": liste_nb_par_bloc = [input.GetBlock(numbloc).GetNumberOfCells() for numbloc in num_blocs] liste_arg = [ numpy.arange( int(numpy.sum(liste_nb_par_bloc[:rang_bloc])), int(numpy.sum(liste_nb_par_bloc[:rang_bloc]) + liste_nb_par_bloc[rang_bloc]) ) for rang_bloc in range(len(num_blocs)) ] for rang_bloc in range(len(num_blocs)): narray = numpy_array[liste_arg[rang_bloc]] numbloc = num_blocs[rang_bloc] output.SetBlock(numbloc, ajouter_numpy_array_as_vtk_array( output.GetBlock(numbloc), narray, nom) ) #monobloc - polydata, structured ou autre else: numpy_array = numpy.ascontiguousarray(numpy_array) varray = numpy_support.numpy_to_vtk(numpy_array, deep = 1) varray.SetName(nom) if loc == 'points': # if len(numpy_array.shape) == 2 and numpy_array.shape[1] == 3: # output.GetPointData().SetVectors(varray) # output.GetPointData().SetActiveVectors(nom) # else: # output.GetPointData().AddArray(varray) output.GetPointData().AddArray(varray) output.GetPointData().Update() elif loc == "cells": # if len(numpy_array.shape) == 2 and numpy_array.shape[1] == 3: # output.GetCellData().SetVectors(varray) # output.GetCellData().SetActiveVectors(nom) # else: # output.GetCellData().AddArray(varray) output.GetCellData().AddArray(varray) output.GetCellData().Update() return output #__________________________________________________________________________________________ #__________________________________________________________________________________________ def VTKThreshold(input, nom_array, valeur_min=None, valeur_max=None, loc='points', UseContinuousCellRange=False): """Fonction Threshold avec nom_array multi ou monobloc indiquer seulement valeur_min pour threshold by min indiquer seulement valeur_max pour threshold by max indiquer les deux pour threshold between loc = 'points', sinon on utilise les donnees aux cellules amarsan complement 03 novembre 2017 modification de la fonction pour pouvoir clipper avec une coordonnee 'coordsX', 'coordsY', 'coordsZ' """ # dans le cas ou on veut Threshold avce une coordonnee spatiale, on ajoute d'abord cette coordonnee spatiale aux points if nom_array[:6] == 'coords': print 'ajout de la coordonnee {0} aux points'.format(nom_array) coords = get_vtk_array_as_numpy_array(input, 'coords') input = ajouter_numpy_array_as_vtk_array(input, coords[:, {'coordsX': 0, 'coordsY': 1, 'coordsZ': 2}[nom_array]], nom_array) #cas multibloc if isinstance(input, vtk.vtkMultiBlockDataSet): output = vtk_new_instance(input) for numbloc in get_numeros_blocs_non_vides(input): output.SetBlock(numbloc, VTKThreshold(input.GetBlock(numbloc), nom_array, valeur_min, valeur_max, loc) ) #cas monobloc else: if nom_array in get_noms_arrays_presents(input, loc = 'points'): input.GetPointData().SetActiveScalars(nom_array) input.GetCellData().SetActiveScalars(None) elif nom_array in get_noms_arrays_presents(input, loc = 'cells'): input.GetCellData().SetActiveScalars(nom_array) input.GetPointData().SetActiveScalars(None) else: raise IOError, "{0} n'est pas present dans input".format(nom_array) select = vtk.vtkThreshold() select.AllScalarsOff() select.UseContinuousCellRangeOn() if UseContinuousCellRange else select.UseContinuousCellRangeOff() vtk_set_input(select, input) if valeur_min != None and valeur_max != None: select.ThresholdBetween(valeur_min, valeur_max) elif valeur_min != None: select.ThresholdByUpper(valeur_min) elif valeur_max != None: select.ThresholdByLower(valeur_max) else: raise IOError, "indiquez valeur_min ou valeur_max" select.Update() output = select.GetOutput() return output #__________________________________________________________________________________________ #__________________________________________________________________________________________ def VTKBoxClip(input, xmin, xmax, ymin, ymax, zmin, zmax): """ clip avec une boite. """ print 'Fonction extremement lente, je ne sais pas pourquoi. (amarsan 3 novembre 2017)' if 0: clipper = vtk.vtkBoxClipDataSet() vtk_set_input(clipper, input) clipper.SetBoxClip(xmin, xmax, ymin, ymax, zmin, zmax) clipper.Update() return clipper.GetClippedOutput() return None #__________________________________________________________________________________________ #__________________________________________________________________________________________ def VTKSubset(input, i_gardes = None, j_gardes = None, k_gardes = None): """Fonction Subset avec les indices i, j, k multi ou monobloc, ne marche que sur un vtkStructuredGrid les valeurs des indices sont celles qu'on mettrait dans Paraview (donc attention en particulier au decalage de 1 par rapport a elsA !!) indice_gardes = [indice_min, indice_max], 3 paires d'indices a donner par defaut, si on ne donne pas d'info sur un indice, pas de decoupage dans cette direction """ #cas multibloc if isinstance(input, vtk.vtkMultiBlockDataSet): output = vtk_new_instance(input) for numbloc in get_numeros_blocs_non_vides(input): output.SetBlock(numbloc, VTKThreshold(input.GetBlock(numbloc), nom_array, valeur_min, valeur_max) ) #cas monobloc else: dim_gardees = list(input.GetExtent()) if i_gardes != None: dim_gardees[0], dim_gardees[1] = i_gardes[0], i_gardes[1] if j_gardes != None: dim_gardees[2], dim_gardees[3] = j_gardes[0], j_gardes[1] if k_gardes != None: dim_gardees[4], dim_gardees[5] = k_gardes[0], k_gardes[1] extract = vtkFiltersExtraction.vtkExtractGrid() vtk_set_input(extract, input) extract.SetVOI(dim_gardees) extract.Update() output = extract.GetOutput() return output #__________________________________________________________________________________________ #__________________________________________________________________________________________ def modifier_axe(objetVTK, nv_repere): """fonction qui modifie les axes (x,y,z) de l'objet VTK donne en entree sans modifier la facon dont il est indexe (i,j,k) Retourne un objet VTK avec le nouveau repere souhaite objetVTK peut etre mono-bloc ou multi-blocs Exemple d'utilisation : on veut intervertir les axes x et z dans tous les cas, l'ancien repere est represente par (x,y,z) = (1,2,3) On exprime le nouveau repere en fonction de l'ancien : x => z , y => y , z => x : cela se traduit par nv_repere = (3,2,1) On peut aussi changer le sens d'un axe. Exemple : Pour x => y , y => -z , z => -x : on donnera nv_repere = (2,-3,-1) FONCTION A COMPLETER ULTERIEUREMENT : ca marche actuellement pour un maillage, il faudra par la suite l'etendre a d'autres types de donnees comme des champs aeros, par exemple """ if isinstance(objetVTK, vtk.vtkMultiBlockDataSet): n = vtk.vtkMultiBlockDataSet() for numbloc in get_numeros_blocs_non_vides(objetVTK): n.SetBlock(numbloc, modifier_axe(objetVTK.GetBlock(numbloc), nv_repere) ) return n else: dims = objetVTK.GetDimensions() coords = get_vtk_array_as_numpy_array(objetVTK, "coords") coords = coords.reshape(dims[::-1] + (3,)) coords_new = numpy.concatenate( [ coords[:, :, :, abs(nv_repere[0]) - 1, None] * numpy.sign(nv_repere[0]), coords[:, :, :, abs(nv_repere[1]) - 1, None] * numpy.sign(nv_repere[1]), coords[:, :, :, abs(nv_repere[2]) - 1, None] * numpy.sign(nv_repere[2]) ], axis = 3 ) coords_new = numpy.array(coords_new) coords_new = coords_new.reshape( coords_new.size / 3, 3) vtk_array = numpy_support.numpy_to_vtk(coords_new, deep=1) point = vtk.vtkPoints() point.SetData(vtk_array) objetVTK = vtk_new_instance(objetVTK) objetVTK.SetPoints(point) objetVTK.SetDimensions(dims) return objetVTK #__________________________________________________________________________________________ #__________________________________________________________________________________________ def get_noms_blocs(multibloc): """Fonction qui retourne les noms des blocs d'un multibloc sous forme de liste. La position dans la liste correspond au numero du bloc """ liste = [] for numbloc in range(multibloc.GetNumberOfBlocks()): nom_bloc = multibloc.GetMetaData(numbloc).Get(vtk.vtkCompositeDataSet.NAME()) if nom_bloc is not None: liste.append(nom_bloc.replace('\n', '').replace(' ', '')) else: liste.append(nom_bloc) return liste #__________________________________________________________________________________________ #__________________________________________________________________________________________ def get_bloc_par_nom(multibloc, nom_bloc): """Fonction qui retourne les noms des blocs d'un multibloc sous forme de liste. La position dans la liste correspond au numero du bloc """ liste_noms = get_noms_blocs(multibloc) if not nom_bloc in liste_noms: raise Exception, 'Aucun bloc ne porte ce nom. Les noms sont ' + str(liste_noms) return multibloc.GetBlock(liste_noms.index(nom_bloc)) #__________________________________________________________________________________________ #__________________________________________________________________________________________ def get_numero_bloc_par_nom(multibloc, nom_bloc): """Fonction qui retourne les noms des blocs d'un multibloc sous forme de liste. La position dans la liste correspond au numero du bloc """ liste_noms = get_noms_blocs(multibloc) if not nom_bloc in liste_noms: raise Exception, 'Aucun bloc ne porte ce nom. Les noms sont ' + str(liste_noms) return liste_noms.index(nom_bloc) #__________________________________________________________________________________________ #__________________________________________________________________________________________ def create_bloc_structure_from_numpy_array(coords): """Fonction qui retourne un bloc structure a partir d'un numpy array qui contient les coordonnees des points. Le numpy array doit avoir les dimensions suivantes (ni, nj, nk, 3) """ #creation du bloc bloc = vtk.vtkStructuredGrid() #conversion du numpy array en vtk array vtkArray = numpy_support.numpy_to_vtk(numpy.ascontiguousarray( coords.transpose(2, 1, 0, 3).reshape(coords.size / 3, 3)), deep = 1) #creation des points points = vtk.vtkPoints() points.SetData(vtkArray) #remplissage du bloc bloc.SetDimensions(coords.shape[0], coords.shape[1], coords.shape[2]) bloc.SetPoints(points) return bloc #_____________________________________________________________________ #__________________________________________________________________________________________ def create_bloc_non_structure_from_numpy_array(coords, cells, cellstypes, cellslocations): """Fonction qui retourne un bloc non-structure a partir de numpy array Les entrees a donner sont : - coords : un numpy_array de dimension (nb_points, 3) qui contient les coordonnees des points. - cells : l'arbre de connectivite, qui definit les cellules. (nb_points_dans_cellule_1, indice_point_1, indice_point_2, indice_point_3, ..., nb_points_dans_cellule2, indice_point_, indice_point_, indice_point_, ...) - cellstypes : qui definit le type des cellules, une par une - cellslocations : qui definit la position de la definition de la cellules dans l'array <cells> """ #creation du bloc bloc = vtk.vtkUnstructuredGrid() #conversion du numpy array en vtk array vtkArray = numpy_support.numpy_to_vtk(numpy.ascontiguousarray(coords), deep = 1) #creation des points points = vtk.vtkPoints() points.SetData(vtkArray) #remplissage du bloc bloc.SetPoints(points) # definition des cellules vtkCells = vtk.vtkCellArray() vtkCells.SetCells(cellstypes.size, numpy_support.numpy_to_vtk(cells, deep = 1, array_type = vtk.vtkIdTypeArray().GetDataType())) bloc.SetCells( numpy_support.numpy_to_vtk(cellstypes, deep = 1, array_type = vtk.vtkUnsignedCharArray().GetDataType()), numpy_support.numpy_to_vtk(cellslocations, deep = 1, array_type = vtk.vtkIdTypeArray().GetDataType()), vtkCells ) # bloc.Update() # bloc.UpdateData() return bloc #_____________________________________________________________________ #__________________________________________________________________________________________ def create_polydata_from_numpy_array(coords, polys=None, nb_polys=None, isline=False): """Fonction qui retourne une surface polydata a partir de numpy array Les entrees a donner sont : - coords : un numpy_array de dimension (nb_points, 3) qui contient les coordonnees des points. - polys : les polydata = connectivite - nb_polys : le nombre de cellules - isline : dans le cas ou c'est une ligne qu'on veut creer, et non pas une surface. """ #creation du bloc bloc = vtk.vtkPolyData() #conversion du numpy array en vtk array vtkArray = numpy_support.numpy_to_vtk(numpy.ascontiguousarray(coords), deep = 1) #creation des points points = vtk.vtkPoints() points.SetData(vtkArray) #remplissage du bloc bloc.SetPoints(points) if polys is not None: if nb_polys is None: raise IOError, 'Indiquer le nombre total de facettes' # definition des polys vtkCells = vtk.vtkCellArray() vtkCells.SetCells(int(nb_polys), numpy_support.numpy_to_vtk( polys.ravel(), deep = 1, array_type = vtk.vtkIdTypeArray().GetDataType() )) if isline: bloc.SetLines(vtkCells) else: bloc.SetPolys(vtkCells) return bloc #_____________________________________________________________________ #_____________________________________________________________________ def extraire_surface(input, region_to_extract = None, angle_split=None): """fonction qui extrait les surfaces region_to_extract doit etre une liste d'entiers. si angle_split = None """ if isinstance(input, vtk.vtkMultiBlockDataSet): output = vtk_new_instance(input) for numbloc in get_numeros_blocs_non_vides(input): output.SetBlock(numbloc, extraire_surface(input.GetBlock(numbloc), region_to_extract, angle_split)) return output # extraction des surfaces exterieures f = vtkFiltersGeometry.vtkGeometryFilter() vtk_set_input(f, input) f.Update() surface = f.GetOutput() # print surface # calcul des normales f = vtk.vtkPolyDataNormals() if angle_split is not None: f.SplittingOn() f.SetFeatureAngle(angle_split) vtk_set_input(f, surface) f.Update() surface = f.GetOutput() # calcul de l'indice de connectivite f = vtk.vtkConnectivityFilter() vtk_set_input(f, surface) if region_to_extract is not None: f.SetExtractionModeToSpecifiedRegions() for id in region_to_extract: f.AddSpecifiedRegion(id) else: f.SetExtractionModeToAllRegions() f.ColorRegionsOn() f.Update() surface = f.GetOutput() # on passe ce filtre, parce que sinon les cellules sont supprimees, mais il reste les points dans le dataset if region_to_extract is not None: f = vtkFiltersGeometry.vtkDataSetSurfaceFilter() vtk_set_input(f, surface) f.Update() surface = f.GetOutput() return surface #_____________________________________________________________________ #_____________________________________________________________________ def ajouter_types_cellules(input): """fonction qui ajoute le type de cellules aux cellules """ # cas multibloc - appel recursif if isinstance(input, vtk.vtkMultiBlockDataSet): output = vtk_new_instance(input) for numbloc in get_numeros_blocs_non_vides(input): output.SetBlock(numbloc, ajouter_types_cellules(input.GetBlock(numbloc))) # cas monobloc else: cellstypes = get_vtk_array_as_numpy_array(input, 'cellstypes') output = ajouter_numpy_array_as_vtk_array(input, cellstypes, 'CellTypes') return output #_____________________________________________________________________ #_____________________________________________________________________ def extraire_blocs(input, liste_numeros): """fonction d'extraction des blocs d'un multibloc """ m = vtk.vtkMultiBlockDataSet() for numbloc in liste_numeros: m.SetBlock(numbloc, input.GetBlock(numbloc)) return m #_____________________________________________________________________
aurmarsan/pyturbo
fonctions_basiques.py
Python
mit
95,033
[ "ParaView", "VTK" ]
3f9f296a7a68ed339b76b2826f29dd870cc6d965c819db27acaa8852672c23ae
import re from scrapy.spider import BaseSpider from scrapy.selector import HtmlXPathSelector from scrapy.http import Request from scrapy.utils.response import get_base_url from scrapy.utils.url import urljoin_rfc from scrapy.utils.response import open_in_browser from product_spiders.items import Product, ProductLoader import logging #user_agent = 'Mozilla/5.0 (Windows NT 5.1; rv:7.0.1) Gecko/20100101 Firefox/7.0.1' user_agent = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.52 Safari/536.5' class AmazonComSpider(BaseSpider): name = 'amazon.com_espresso' allowed_domains = ['amazon.com'] start_urls = ('http://www.amazon.com', ) headers = { 'User-agent': user_agent } form_headers = { 'User-agent': user_agent, 'Content-Type': 'application/x-www-form-urlencoded' } category_ids = [ '2251595011', '2251593011', '2251592011', '915194' ] search_url = "http://www.amazon.com/s/ref=nb_sb_noss?keywords=espresso&node=%%cat_id%%" def start_requests(self): for cat_id in self.category_ids: yield Request( self.search_url.replace("%%cat_id%%", cat_id), headers=self.headers, callback=self.parse ) def parse(self, response): URL_BASE = get_base_url(response) hxs = HtmlXPathSelector(response) total = hxs.select("//h2[@id='resultCount']//text()").re("Showing .*? - .*? of (.*?) Results") bottom = hxs.select("//h2[@id='resultCount']//text()").re("Showing (.*?) - .*? of .*? Results") top = hxs.select("//h2[@id='resultCount']//text()").re("Showing .*? - (.*?) of .*? Results") if total: total = int(total[0].replace(",", "")) logging.error("Total: %d" % total) if top and bottom: top = int(top[0].replace(",", "")) bottom = int(bottom[0].replace(",", "")) else: logging.error("No numbers!") logging.error("Top: %s" % top) logging.error("Bottom: %s" % bottom) return # parse products items = hxs.select("//div[contains(@class, 'result') and contains(@class, 'product')]") if not items: logging.error("ERROR! No products %s" % response.url) if top - bottom > 0: logging.error("Products exist but not found!") items_count = 0 counter = 0 for item in items: counter += 1 name = item.select("div[@class='data']/h3/a/text()").extract() if not name: name = item.select("div[@class='data']/h3/a/span/@title").extract() if not name: logging.error("ERROR! NO NAME! URL: %s" % response.url) continue name = name[0] logging.error("%d. Name: %s" % (counter, name)) url = item.select("div[@class='data']/h3/a/@href").extract() if not url: logging.error("ERROR! NO URL! URL: %s. NAME: %s" % (response.url, name)) continue url = url[0] url = urljoin_rfc(URL_BASE, url) logging.error("%d. URL: %s" % (counter, url)) price = item.select("div/div[contains(@class,'newPrice')]/span[contains(@class, 'price')]/text()").extract() if not price: price = item.select("div/div[@class='usedNewPrice']/span[@class='subPrice']/span[@class='price']/text()").extract() if not price: external = hxs.select(".//div[@class='prodAds']") if external: logging.error("External site") else: logging.error("ERROR! No price! URL: %s. NAME: %s" % (response.url, name)) continue price = price[0] logging.error("%d. Price: %s" % (counter, price)) l = ProductLoader(item=Product(), response=response) l.add_value('name', name) l.add_value('url', url) l.add_value('price', price) yield l.load_item() items_count += 1 logging.error("Found %d items" % len(items)) logging.error("Processed %d items" % items_count) # get current page number m = re.search("page=([\d]*)&", response.url) if not m: current_page_number = 0 else: current_page_number = int(m.group(1)) # pages pages = hxs.select("//span[@class='pagnLink']/a/@href").extract() for url in pages: m = re.search("page=([\d]*)&", url) if not m: continue else: page_number = int(m.group(1)) if page_number > current_page_number: request = Request( urljoin_rfc(URL_BASE, url), headers=self.headers, callback=self.parse ) yield request ## parse pages #if len(items) > 0: ## process next page #page_param_index = response.url.find("page=") #if page_param_index > -1: ## page > 1 #page_param_index += len("page=") #current_page = int(response.url[page_param_index:]) #next_page_url = response.url[:page_param_index] + str(current_page + 1) #else: #next_page_url = response.url + "&page=" + str(2) #request = Request(urljoin_rfc(URL_BASE, next_page_url), callback=self.parse) #yield request
0--key/lib
portfolio/Python/scrapy/seattlecoffeegear/amazonespresso.py
Python
apache-2.0
5,770
[ "ESPResSo" ]
0043d400718f955c7dfb167dc68bbbfdf59a401a8293318f2d1700193dd03972
from .variables import (GlobalScoreVariable, LocalScoreVariable, LocalStackVariable, GlobalNbtVariable) from .optimizers import Optimizer, TopVisitor, FuncVisitor from commands import Var class Allocator(TopVisitor): def __init__(self, namespace): self.ns = namespace def visit(self, top): return super().visit(top) def visit_global(self, name, var): if var.proxy_set: # Assembler and dynamic linker already allocate return name, var namespace = var.proxy_ns or self.ns # Global names are unique within the ns so use that uniqueness for # objectives and nbt keys if var.type.isnumeric: vref = Var('g_%s' % name, namespace) real_var = GlobalScoreVariable(var.type, vref) else: real_var = GlobalNbtVariable(var.type, namespace, name) var.set_proxy(real_var) return name, var def visit_function(self, name, func): FuncAllocator().visit(func) return name, func class FuncAllocator(FuncVisitor): def visit(self, func): # Special case for assembler - it already finalizes if func.finalized: return self.reg_offset = 0 self.nbt_offset = 0 self.use_scores = True self.ns = func.namespace super().visit(func) func.variables_finalized() def visit_local_var(self, name, var): if self.use_scores and var.type.isnumeric: reg = Var('reg_%d' % self.reg_offset, self.ns) var.set_proxy(LocalScoreVariable(var.type, reg)) if self.reg_offset >= 4: self.use_scores = False self.reg_offset += 1 else: var.set_proxy(LocalStackVariable(var.type, self.ns, self.nbt_offset)) self.nbt_offset += 1 return name, var def default_allocation(top, opt_level, namespace): if opt_level: optimizer = Optimizer() optimizer.optimize(top) Allocator(namespace).visit(top) optimizer.optimize(top) else: Allocator(namespace).visit(top)
simon816/Command-Block-Assembly
cmd_ir/allocator.py
Python
mit
2,195
[ "VisIt" ]
3c0b444ab55ada658642d9511014e24fc3ce02bac083630dde7d8e84d57b4617
#!/usr/bin/env python ################################################################################ # Copyright (C) 2014, 2015 GenAP, McGill University and Genome Quebec Innovation Centre # # This file is part of MUGQIC Pipelines. # # MUGQIC Pipelines is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # MUGQIC Pipelines is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with MUGQIC Pipelines. If not, see <http://www.gnu.org/licenses/>. ################################################################################ # Python Standard Modules import logging import math import os import re import sys # Append mugqic_pipelines directory to Python library path sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0]))))) # MUGQIC Modules from core.config import * from core.job import * from core.pipeline import * from bfx.readset import * from bfx.sequence_dictionary import * from bfx import bvatools from bfx import bwa from bfx import gatk from bfx import gq_seq_utils from bfx import igvtools from bfx import metrics from bfx import picard from bfx import samtools from bfx import snpeff from bfx import tools from bfx import vcftools from pipelines import common log = logging.getLogger(__name__) class DnaSeq(common.Illumina): """ DNA-Seq Pipeline ================ The standard MUGQIC DNA-Seq pipeline uses BWA to align reads to the reference genome. Treatment and filtering of mapped reads approaches as INDEL realignment, mark duplicate reads, recalibration and sort are executed using Picard and GATK. Samtools MPILEUP and bcftools are used to produce the standard SNP and indels variants file (VCF). Additional SVN annotations mostly applicable to human samples include mappability flags, dbSNP annotation and extra information about SVN by using published databases. The SNPeff tool is used to annotate variants using an integrated database of functional predictions from multiple algorithms (SIFT, Polyphen2, LRT and MutationTaster, PhyloP and GERP++, etc.) and to calculate the effects they produce on known genes. A list of effects and annotations that SnpEff calculate can be found [here](http://snpeff.sourceforge.net/faq.html#What_effects_are_predicted?). A summary html report is automatically generated by the pipeline. This report contains description of the sequencing experiment as well as a detailed presentation of the pipeline steps and results. Various Quality Control (QC) summary statistics are included in the report and additional QC analysis is accessible for download directly through the report. The report includes also the main references of the software and methods used during the analysis, together with the full list of parameters that have been passed to the pipeline main script. An example of the DNA-Seq report for an analysis on public data is available for illustration purpose only: [DNA-Seq report](http://gqinnovationcenter.com/services/bioinformatics/tools/dnaReport/index.html). [Here](https://bitbucket.org/mugqic/mugqic_pipelines/downloads/MUGQIC_Bioinfo_DNA-Seq.pptx) is more information about DNA-Seq pipeline that you may find interesting. """ @property def sequence_dictionary(self): if not hasattr(self, "_sequence_dictionary"): self._sequence_dictionary = parse_sequence_dictionary_file(config.param('DEFAULT', 'genome_dictionary', type='filepath')) return self._sequence_dictionary def bwa_mem_picard_sort_sam(self): """ The filtered reads are aligned to a reference genome. The alignment is done per sequencing readset. The alignment software used is [BWA](http://bio-bwa.sourceforge.net/) with algorithm: bwa mem. BWA output BAM files are then sorted by coordinate using [Picard](http://broadinstitute.github.io/picard/). This step takes as input files: 1. Trimmed FASTQ files if available 2. Else, FASTQ files from the readset file if available 3. Else, FASTQ output files from previous picard_sam_to_fastq conversion of BAM files """ jobs = [] for readset in self.readsets: trim_file_prefix = os.path.join("trim", readset.sample.name, readset.name + ".trim.") alignment_directory = os.path.join("alignment", readset.sample.name) readset_bam = os.path.join(alignment_directory, readset.name, readset.name + ".sorted.bam") # Find input readset FASTQs first from previous trimmomatic job, then from original FASTQs in the readset sheet if readset.run_type == "PAIRED_END": candidate_input_files = [[trim_file_prefix + "pair1.fastq.gz", trim_file_prefix + "pair2.fastq.gz"]] if readset.fastq1 and readset.fastq2: candidate_input_files.append([readset.fastq1, readset.fastq2]) if readset.bam: candidate_input_files.append([re.sub("\.bam$", ".pair1.fastq.gz", readset.bam), re.sub("\.bam$", ".pair2.fastq.gz", readset.bam)]) [fastq1, fastq2] = self.select_input_files(candidate_input_files) elif readset.run_type == "SINGLE_END": candidate_input_files = [[trim_file_prefix + "single.fastq.gz"]] if readset.fastq1: candidate_input_files.append([readset.fastq1]) if readset.bam: candidate_input_files.append([re.sub("\.bam$", ".single.fastq.gz", readset.bam)]) [fastq1] = self.select_input_files(candidate_input_files) fastq2 = None else: raise Exception("Error: run type \"" + readset.run_type + "\" is invalid for readset \"" + readset.name + "\" (should be PAIRED_END or SINGLE_END)!") job = concat_jobs([ Job(command="mkdir -p " + os.path.dirname(readset_bam)), pipe_jobs([ bwa.mem( fastq1, fastq2, read_group="'@RG" + \ "\tID:" + readset.name + \ "\tSM:" + readset.sample.name + \ "\tLB:" + (readset.library if readset.library else readset.sample.name) + \ ("\tPU:run" + readset.run + "_" + readset.lane if readset.run and readset.lane else "") + \ ("\tCN:" + config.param('bwa_mem', 'sequencing_center') if config.param('bwa_mem', 'sequencing_center', required=False) else "") + \ "\tPL:Illumina" + \ "'" ), picard.sort_sam( "/dev/stdin", readset_bam, "coordinate" ) ]) ], name="bwa_mem_picard_sort_sam." + readset.name) jobs.append(job) report_file = os.path.join("report", "DnaSeq.bwa_mem_picard_sort_sam.md") jobs.append( Job( [os.path.join("alignment", readset.sample.name, readset.name, readset.name + ".sorted.bam") for readset in self.readsets], [report_file], [['bwa_mem_picard_sort_sam', 'module_pandoc']], command="""\ mkdir -p report && \\ pandoc --to=markdown \\ --template {report_template_dir}/{basename_report_file} \\ --variable scientific_name="{scientific_name}" \\ --variable assembly="{assembly}" \\ {report_template_dir}/{basename_report_file} \\ > {report_file}""".format( scientific_name=config.param('bwa_mem_picard_sort_sam', 'scientific_name'), assembly=config.param('bwa_mem_picard_sort_sam', 'assembly'), report_template_dir=self.report_template_dir, basename_report_file=os.path.basename(report_file), report_file=report_file ), report_files=[report_file], name="bwa_mem_picard_sort_sam_report") ) return jobs def picard_merge_sam_files(self): """ BAM readset files are merged into one file per sample. Merge is done using [Picard](http://broadinstitute.github.io/picard/). This step takes as input files: 1. Aligned and sorted BAM output files from previous bwa_mem_picard_sort_sam step if available 2. Else, BAM files from the readset file """ jobs = [] for sample in self.samples: alignment_directory = os.path.join("alignment", sample.name) # Find input readset BAMs first from previous bwa_mem_picard_sort_sam job, then from original BAMs in the readset sheet. readset_bams = self.select_input_files([[os.path.join(alignment_directory, readset.name, readset.name + ".sorted.bam") for readset in sample.readsets], [readset.bam for readset in sample.readsets]]) sample_bam = os.path.join(alignment_directory, sample.name + ".sorted.bam") mkdir_job = Job(command="mkdir -p " + os.path.dirname(sample_bam)) # If this sample has one readset only, create a sample BAM symlink to the readset BAM, along with its index. if len(sample.readsets) == 1: readset_bam = readset_bams[0] if os.path.isabs(readset_bam): target_readset_bam = readset_bam else: target_readset_bam = os.path.relpath(readset_bam, alignment_directory) readset_index = re.sub("\.bam$", ".bai", readset_bam) target_readset_index = re.sub("\.bam$", ".bai", target_readset_bam) sample_index = re.sub("\.bam$", ".bai", sample_bam) job = concat_jobs([ mkdir_job, Job([readset_bam], [sample_bam], command="ln -s -f " + target_readset_bam + " " + sample_bam, removable_files=[sample_bam]), Job([readset_index], [sample_index], command="ln -s -f " + target_readset_index + " " + sample_index, removable_files=[sample_index]) ], name="symlink_readset_sample_bam." + sample.name) elif len(sample.readsets) > 1: job = concat_jobs([ mkdir_job, picard.merge_sam_files(readset_bams, sample_bam) ]) job.name = "picard_merge_sam_files." + sample.name jobs.append(job) return jobs def gatk_indel_realigner(self): """ Insertion and deletion realignment is performed on regions where multiple base mismatches are preferred over indels by the aligner since it can appear to be less costly by the algorithm. Such regions will introduce false positive variant calls which may be filtered out by realigning those regions properly. Realignment is done using [GATK](https://www.broadinstitute.org/gatk/). The reference genome is divided by a number regions given by the `nb_jobs` parameter. """ jobs = [] nb_jobs = config.param('gatk_indel_realigner', 'nb_jobs', type='posint') if nb_jobs > 50: log.warning("Number of realign jobs is > 50. This is usually much. Anything beyond 20 can be problematic.") for sample in self.samples: alignment_directory = os.path.join("alignment", sample.name) realign_directory = os.path.join(alignment_directory, "realign") input = os.path.join(alignment_directory, sample.name + ".sorted.bam") if nb_jobs == 1: realign_prefix = os.path.join(realign_directory, "all") realign_intervals = realign_prefix + ".intervals" output_bam = realign_prefix + ".bam" sample_output_bam = os.path.join(alignment_directory, sample.name + ".realigned.qsorted.bam") jobs.append(concat_jobs([ Job(command="mkdir -p " + realign_directory, removable_files=[realign_directory]), gatk.realigner_target_creator(input, realign_intervals), gatk.indel_realigner(input, output_bam, target_intervals=realign_intervals), # Create sample realign symlink since no merging is required Job([output_bam], [sample_output_bam], command="ln -s -f " + os.path.relpath(output_bam, os.path.dirname(sample_output_bam)) + " " + sample_output_bam) ], name="gatk_indel_realigner." + sample.name)) else: # The first sequences are the longest to process. # Each of them must be processed in a separate job. unique_sequences_per_job = [sequence['name'] for sequence in self.sequence_dictionary[0:min(nb_jobs - 1, len(self.sequence_dictionary))]] # Create one separate job for each of the first sequences for sequence in unique_sequences_per_job: realign_prefix = os.path.join(realign_directory, sequence) realign_intervals = realign_prefix + ".intervals" intervals=[sequence] if unique_sequences_per_job.index(sequence) == 0: intervals.append("unmapped") output_bam = realign_prefix + ".bam" jobs.append(concat_jobs([ # Create output directory since it is not done by default by GATK tools Job(command="mkdir -p " + realign_directory, removable_files=[realign_directory]), gatk.realigner_target_creator(input, realign_intervals, intervals=[sequence]), gatk.indel_realigner(input, output_bam, target_intervals=realign_intervals, intervals=intervals) ], name="gatk_indel_realigner." + sample.name + "." + sequence)) # Create one last job to process the last remaining sequences and 'others' sequences realign_prefix = os.path.join(realign_directory, "others") realign_intervals = realign_prefix + ".intervals" output_bam = realign_prefix + ".bam" jobs.append(concat_jobs([ # Create output directory since it is not done by default by GATK tools Job(command="mkdir -p " + realign_directory, removable_files=[realign_directory]), gatk.realigner_target_creator(input, realign_intervals, exclude_intervals=unique_sequences_per_job), gatk.indel_realigner(input, output_bam, target_intervals=realign_intervals, exclude_intervals=unique_sequences_per_job) ], name="gatk_indel_realigner." + sample.name + ".others")) return jobs def merge_realigned(self): """ BAM files of regions of realigned reads are merged per sample using [Picard](http://broadinstitute.github.io/picard/). """ jobs = [] nb_jobs = config.param('gatk_indel_realigner', 'nb_jobs', type='posint') for sample in self.samples: alignment_directory = os.path.join("alignment", sample.name) realign_directory = os.path.join(alignment_directory, "realign") merged_realigned_bam = os.path.join(alignment_directory, sample.name + ".realigned.qsorted.bam") # if nb_jobs == 1, symlink has been created in indel_realigner and merging is not necessary if nb_jobs > 1: realigned_bams = [os.path.join(realign_directory, sequence['name'] + ".bam") for sequence in self.sequence_dictionary[0:min(nb_jobs - 1, len(self.sequence_dictionary))]] realigned_bams.append(os.path.join(realign_directory, "others.bam")) job = picard.merge_sam_files(realigned_bams, merged_realigned_bam) job.name = "merge_realigned." + sample.name jobs.append(job) report_file = os.path.join("report", "DnaSeq.gatk_indel_realigner.md") jobs.append( Job( [os.path.join("alignment", sample.name, sample.name + ".realigned.qsorted.bam") for sample in self.samples], [report_file], command="""\ mkdir -p report && \\ cp \\ {report_template_dir}/{basename_report_file} \\ {report_file}""".format( report_template_dir=self.report_template_dir, basename_report_file=os.path.basename(report_file), report_file=report_file ), report_files=[report_file], name="merge_realigned_report") ) return jobs def fix_mate_by_coordinate(self): """ Fix the read mates. Once local regions are realigned, the read mate coordinates of the aligned reads need to be recalculated since the reads are realigned at positions that differ from their original alignment. Fixing the read mate positions is done using [BVATools](https://bitbucket.org/mugqic/bvatools). """ jobs = [] for sample in self.samples: alignment_file_prefix = os.path.join("alignment", sample.name, sample.name + ".") input = alignment_file_prefix + "realigned.qsorted.bam" output_prefix = alignment_file_prefix + "matefixed.sorted" jobs.append(concat_jobs([ bvatools.groupfixmate(input, output_prefix + ".tmp.bam"), picard.sort_sam(output_prefix + ".tmp.bam", output_prefix+ ".bam"), ], name="fix_mate_by_coordinate." + sample.name)) report_file = os.path.join("report", "DnaSeq.fix_mate_by_coordinate.md") jobs.append( Job( [os.path.join("alignment", sample.name, sample.name + ".matefixed.sorted.bam") for sample in self.samples], [report_file], command="""\ mkdir -p report && \\ cp \\ {report_template_dir}/{basename_report_file} \\ {report_file}""".format( report_template_dir=self.report_template_dir, basename_report_file=os.path.basename(report_file), report_file=report_file ), report_files=[report_file], name="fix_mate_by_coordinate_report") ) return jobs def picard_mark_duplicates(self): """ Mark duplicates. Aligned reads per sample are duplicates if they have the same 5' alignment positions (for both mates in the case of paired-end reads). All but the best pair (based on alignment score) will be marked as a duplicate in the BAM file. Marking duplicates is done using [Picard](http://broadinstitute.github.io/picard/). """ jobs = [] for sample in self.samples: alignment_file_prefix = os.path.join("alignment", sample.name, sample.name + ".") input = alignment_file_prefix + "matefixed.sorted.bam" output = alignment_file_prefix + "sorted.dup.bam" metrics_file = alignment_file_prefix + "sorted.dup.metrics" job = picard.mark_duplicates([input], output, metrics_file) job.name = "picard_mark_duplicates." + sample.name jobs.append(job) report_file = os.path.join("report", "DnaSeq.picard_mark_duplicates.md") jobs.append( Job( [os.path.join("alignment", sample.name, sample.name + ".sorted.dup.bam") for sample in self.samples], [report_file], command="""\ mkdir -p report && \\ cp \\ {report_template_dir}/{basename_report_file} \\ {report_file}""".format( report_template_dir=self.report_template_dir, basename_report_file=os.path.basename(report_file), report_file=report_file ), report_files=[report_file], name="picard_mark_duplicates_report") ) return jobs def recalibration(self): """ Recalibrate base quality scores of sequencing-by-synthesis reads in an aligned BAM file. After recalibration, the quality scores in the QUAL field in each read in the output BAM are more accurate in that the reported quality score is closer to its actual probability of mismatching the reference genome. Moreover, the recalibration tool attempts to correct for variation in quality with machine cycle and sequence context, and by doing so, provides not only more accurate quality scores but also more widely dispersed ones. """ jobs = [] for sample in self.samples: duplicate_file_prefix = os.path.join("alignment", sample.name, sample.name + ".sorted.dup.") input = duplicate_file_prefix + "bam" print_reads_output = duplicate_file_prefix + "recal.bam" base_recalibrator_output = duplicate_file_prefix + "recalibration_report.grp" jobs.append(concat_jobs([ gatk.base_recalibrator(input, base_recalibrator_output), gatk.print_reads(input, print_reads_output, base_recalibrator_output), Job(input_files=[print_reads_output], output_files=[print_reads_output + ".md5"], command="md5sum " + print_reads_output + " > " + print_reads_output + ".md5") ], name="recalibration." + sample.name)) report_file = os.path.join("report", "DnaSeq.recalibration.md") jobs.append( Job( [os.path.join("alignment", sample.name, sample.name + ".sorted.dup.recal.bam") for sample in self.samples], [report_file], command="""\ mkdir -p report && \\ cp \\ {report_template_dir}/{basename_report_file} \\ {report_file}""".format( report_template_dir=self.report_template_dir, basename_report_file=os.path.basename(report_file), report_file=report_file ), report_files=[report_file], name="recalibration_report") ) return jobs def metrics(self): """ Compute metrics and generate coverage tracks per sample. Multiple metrics are computed at this stage: Number of raw reads, Number of filtered reads, Number of aligned reads, Number of duplicate reads, Median, mean and standard deviation of insert sizes of reads after alignment, percentage of bases covered at X reads (%_bases_above_50 means the % of exons bases which have at least 50 reads) whole genome or targeted percentage of bases covered at X reads (%_bases_above_50 means the % of exons bases which have at least 50 reads). A TDF (.tdf) coverage track is also generated at this step for easy visualization of coverage in the IGV browser. """ ##check the library status library = {} for readset in self.readsets: if not library.has_key(readset.sample) : library[readset.sample]="SINGLE_END" if readset.run_type == "PAIRED_END" : library[readset.sample]="PAIRED_END" jobs = [] for sample in self.samples: recal_file_prefix = os.path.join("alignment", sample.name, sample.name + ".sorted.dup.recal.") input = recal_file_prefix + "bam" job = picard.collect_multiple_metrics(input, recal_file_prefix + "all.metrics", library_type=library[sample]) job.name = "picard_collect_multiple_metrics." + sample.name jobs.append(job) # Compute genome coverage with GATK job = gatk.depth_of_coverage(input, recal_file_prefix + "all.coverage", bvatools.resolve_readset_coverage_bed(sample.readsets[0])) job.name = "gatk_depth_of_coverage.genome." + sample.name jobs.append(job) # Compute genome or target coverage with BVATools job = bvatools.depth_of_coverage( input, recal_file_prefix + "coverage.tsv", bvatools.resolve_readset_coverage_bed(sample.readsets[0]), other_options=config.param('bvatools_depth_of_coverage', 'other_options', required=False) ) job.name = "bvatools_depth_of_coverage." + sample.name jobs.append(job) job = igvtools.compute_tdf(input, input + ".tdf") job.name = "igvtools_compute_tdf." + sample.name jobs.append(job) return jobs def picard_calculate_hs_metrics(self): """ Compute on target percent of hybridisation based capture. """ jobs = [] created_interval_lists = [] for sample in self.samples: coverage_bed = bvatools.resolve_readset_coverage_bed(sample.readsets[0]) if coverage_bed: interval_list = re.sub("\.[^.]+$", ".interval_list", coverage_bed) if not interval_list in created_interval_lists: job = tools.bed2interval_list(None, coverage_bed, interval_list) job.name = "interval_list." + os.path.basename(coverage_bed) jobs.append(job) created_interval_lists.append(interval_list) recal_file_prefix = os.path.join("alignment", sample.name, sample.name + ".sorted.dup.recal.") job = picard.calculate_hs_metrics(recal_file_prefix + "bam", recal_file_prefix + "onTarget.tsv", interval_list) job.name = "picard_calculate_hs_metrics." + sample.name jobs.append(job) return jobs def gatk_callable_loci(self): """ Computes the callable region or the genome as a bed track. """ jobs = [] for sample in self.samples: alignment_file_prefix = os.path.join("alignment", sample.name, sample.name + ".") job = gatk.callable_loci(alignment_file_prefix + "sorted.dup.recal.bam", alignment_file_prefix + "callable.bed", alignment_file_prefix + "callable.summary.txt") job.name = "gatk_callable_loci." + sample.name jobs.append(job) return jobs def extract_common_snp_freq(self): """ Extracts allele frequencies of possible variants accross the genome. """ jobs = [] for sample in self.samples: alignment_file_prefix = os.path.join("alignment", sample.name, sample.name + ".") job = bvatools.basefreq(alignment_file_prefix + "sorted.dup.recal.bam", alignment_file_prefix + "commonSNPs.alleleFreq.csv", config.param('extract_common_snp_freq', 'common_snp_positions', type='filepath'), 0) job.name = "extract_common_snp_freq." + sample.name jobs.append(job) return jobs def baf_plot(self): """ Plots DepthRatio and B allele frequency of previously extracted alleles. """ jobs = [] for sample in self.samples: alignment_file_prefix = os.path.join("alignment", sample.name, sample.name + ".") job = bvatools.ratiobaf(alignment_file_prefix + "commonSNPs.alleleFreq.csv", alignment_file_prefix + "ratioBAF", config.param('baf_plot', 'common_snp_positions', type='filepath')) job.name = "baf_plot." + sample.name jobs.append(job) return jobs def gatk_haplotype_caller(self): """ GATK haplotype caller for snps and small indels. """ jobs = [] nb_haplotype_jobs = config.param('gatk_haplotype_caller', 'nb_jobs', type='posint') if nb_haplotype_jobs > 50: log.warning("Number of haplotype jobs is > 50. This is usually much. Anything beyond 20 can be problematic.") for sample in self.samples: alignment_directory = os.path.join("alignment", sample.name) haplotype_directory = os.path.join(alignment_directory, "rawHaplotypeCaller") input = os.path.join(alignment_directory, sample.name + ".sorted.dup.recal.bam") if nb_haplotype_jobs == 1: jobs.append(concat_jobs([ # Create output directory since it is not done by default by GATK tools Job(command="mkdir -p " + haplotype_directory,removable_files=[haplotype_directory]), gatk.haplotype_caller(input, os.path.join(haplotype_directory, sample.name + ".hc.g.vcf.bgz")) ], name="gatk_haplotype_caller." + sample.name)) else: unique_sequences_per_job,unique_sequences_per_job_others = split_by_size(self.sequence_dictionary, nb_haplotype_jobs - 1) # Create one separate job for each of the first sequences for idx,sequences in enumerate(unique_sequences_per_job): jobs.append(concat_jobs([ # Create output directory since it is not done by default by GATK tools Job(command="mkdir -p " + haplotype_directory,removable_files=[haplotype_directory]), gatk.haplotype_caller(input, os.path.join(haplotype_directory, sample.name + "." + str(idx) + ".hc.g.vcf.bgz"), intervals=sequences) ], name="gatk_haplotype_caller." + sample.name + "." + str(idx))) # Create one last job to process the last remaining sequences and 'others' sequences jobs.append(concat_jobs([ # Create output directory since it is not done by default by GATK tools Job(command="mkdir -p " + haplotype_directory,removable_files=[haplotype_directory]), gatk.haplotype_caller(input, os.path.join(haplotype_directory, sample.name + ".others.hc.g.vcf.bgz"), exclude_intervals=unique_sequences_per_job_others) ], name="gatk_haplotype_caller." + sample.name + ".others")) return jobs def merge_and_call_individual_gvcf(self): """ Merges the gvcfs of haplotype caller and also generates a per sample vcf containing genotypes. """ jobs = [] nb_haplotype_jobs = config.param('gatk_haplotype_caller', 'nb_jobs', type='posint') for sample in self.samples: haplotype_file_prefix = os.path.join("alignment", sample.name, "rawHaplotypeCaller", sample.name) output_haplotype_file_prefix = os.path.join("alignment", sample.name, sample.name) if nb_haplotype_jobs == 1: gvcfs_to_merge = [haplotype_file_prefix + ".hc.g.vcf.bgz"] else: unique_sequences_per_job,unique_sequences_per_job_others = split_by_size(self.sequence_dictionary, nb_haplotype_jobs - 1) gvcfs_to_merge = [haplotype_file_prefix + "." + str(idx) + ".hc.g.vcf.bgz" for idx in xrange(len(unique_sequences_per_job))] gvcfs_to_merge.append(haplotype_file_prefix + ".others.hc.g.vcf.bgz") jobs.append(concat_jobs([ gatk.cat_variants(gvcfs_to_merge, output_haplotype_file_prefix + ".hc.g.vcf.bgz"), gatk.genotype_gvcf([output_haplotype_file_prefix + ".hc.g.vcf.bgz"], output_haplotype_file_prefix + ".hc.vcf.bgz",config.param('gatk_merge_and_call_individual_gvcfs', 'options')) ], name="merge_and_call_individual_gvcf." + sample.name)) return jobs def combine_gvcf(self): """ Combine the per sample gvcfs of haplotype caller into one main file for all sample. """ jobs = [] nb_haplotype_jobs = config.param('gatk_combine_gvcf', 'nb_haplotype', type='posint') nb_maxbatches_jobs = config.param('gatk_combine_gvcf', 'nb_batch', type='posint') # merge all sample in one shot if nb_maxbatches_jobs == 1 : if nb_haplotype_jobs == 1: jobs.append(concat_jobs([ Job(command="mkdir -p variants"), gatk.combine_gvcf([ os.path.join("alignment", sample.name, sample.name)+".hc.g.vcf.bgz" for sample in self.samples ], os.path.join("variants", "allSamples.hc.g.vcf.bgz"))], name="gatk_combine_gvcf.AllSamples")) else : unique_sequences_per_job,unique_sequences_per_job_others = split_by_size(self.sequence_dictionary, nb_haplotype_jobs - 1) # Create one separate job for each of the first sequences for idx,sequences in enumerate(unique_sequences_per_job): obs.append(concat_jobs([ Job(command="mkdir -p variants",removable_files=[os.path.join("variants", "allSamples") + "." + str(idx) + ".hc.g.vcf.bgz",os.path.join("variants", "allSamples") + "." + str(idx) + ".hc.g.vcf.bgz.tbi"]), gatk.combine_gvcf([ os.path.join("alignment", sample.name, sample.name)+".hc.g.vcf.bgz" for sample in self.samples ], os.path.join("variants", "allSamples") + "." + str(idx) + ".hc.g.vcf.bgz", intervals=sequences) ], name="gatk_combine_gvcf.AllSample" + "." + str(idx))) # Create one last job to process the last remaining sequences and 'others' sequences job=gatk.combine_gvcf([ os.path.join("alignment", sample.name, sample.name)+".hc.g.vcf.bgz" for sample in self.samples ], os.path.join("alignment", "allSamples.others.hc.g.vcf.bgz"), exclude_intervals=unique_sequences_per_job_others) job.name="gatk_combine_gvcf.AllSample" + ".others" job.removable_files=[os.path.join("variants", "allSamples.others.hc.g.vcf.bgz"),os.path.join("variants", "allSamples.others.hc.g.vcf.bgz.tbi") ] jobs.append(job) else: #Combine samples by batch (pre-defined batches number in ini) sample_per_batch = int(math.ceil(len(self.samples)/float(nb_maxbatches_jobs))) batch_of_sample = [ self.samples[i:(i+sample_per_batch)] for i in range(0,len(self.samples),sample_per_batch) ] cpt = 0 batches = [] for batch in batch_of_sample : if nb_haplotype_jobs == 1: jobs.append(concat_jobs([ Job(command="mkdir -p variants",removable_files=[os.path.join("variants", "allSamples.batch" + str(cpt) + ".hc.g.vcf.bgz"),os.path.join("variants", "allSamples.batch" + str(cpt) + ".hc.g.vcf.bgz.tbi")]), gatk.combine_gvcf([ os.path.join("alignment", sample.name, sample.name)+".hc.g.vcf.bgz" for sample in batch ], os.path.join("variants", "allSamples.batch" + str(cpt) + ".hc.g.vcf.bgz")) ], name="gatk_combine_gvcf.AllSamples.batch" + str(cpt))) else : unique_sequences_per_job,unique_sequences_per_job_others = split_by_size(self.sequence_dictionary, nb_haplotype_jobs - 1) # Create one separate job for each of the first sequences for idx,sequences in enumerate(unique_sequences_per_job): jobs.append(concat_jobs([ Job(command="mkdir -p variants",removable_files=[os.path.join("variants", "allSamples") + ".batch" + str(cpt) + "." + str(idx) + ".hc.g.vcf.bgz",os.path.join("variants", "allSamples") + ".batch" + str(cpt) + "." + str(idx) + ".hc.g.vcf.bgz.tbi"]), gatk.combine_gvcf([ os.path.join("alignment", sample.name, sample.name)+".hc.g.vcf.bgz" for sample in batch ], os.path.join("variants", "allSamples") + ".batch" + str(cpt) + "." + str(idx) + ".hc.g.vcf.bgz", intervals=sequences) ], name="gatk_combine_gvcf.AllSample" + ".batch" + str(cpt) + "." + str(idx))) # Create one last job to process the last remaining sequences and 'others' sequences job=gatk.combine_gvcf([ os.path.join("alignment", sample.name, sample.name)+".hc.g.vcf.bgz" for sample in batch ], os.path.join("variants", "allSamples" + ".batch" + str(cpt) + ".others.hc.g.vcf.bgz"), exclude_intervals=unique_sequences_per_job_others) job.name="gatk_combine_gvcf.AllSample" + ".batch" + str(cpt) + ".others" job.removable_files=[os.path.join("variants", "allSamples" + ".batch" + str(cpt) + ".others.hc.g.vcf.bgz"),os.path.join("variants", "allSamples" + ".batch" + str(cpt) + ".others.hc.g.vcf.bgz.tbi")] jobs.append(job) batches.append("batch" + str(cpt)) cpt = cpt + 1 #Combine batches altogether if nb_haplotype_jobs == 1: job=gatk.combine_gvcf([ os.path.join("variants", "allSamples." + batch_idx + ".hc.g.vcf.bgz") for batch_idx in batches ], os.path.join("variants", "allSamples.hc.g.vcf.bgz")) job.name="gatk_combine_gvcf.AllSamples.batches" jobs.append(job) else : unique_sequences_per_job,unique_sequences_per_job_others = split_by_size(self.sequence_dictionary, nb_haplotype_jobs - 1) # Create one separate job for each of the first sequences for idx,sequences in enumerate(unique_sequences_per_job): job=gatk.combine_gvcf([ os.path.join("variants", "allSamples." + batch_idx + "." + str(idx) + ".hc.g.vcf.bgz") for batch_idx in batches ], os.path.join("variants", "allSamples") + "." + str(idx) + ".hc.g.vcf.bgz", intervals=sequences) job.name="gatk_combine_gvcf.AllSample" + "." + str(idx) job.removable_files=[os.path.join("variants", "allSamples") + "." + str(idx) + ".hc.g.vcf.bgz",os.path.join("variants", "allSamples") + "." + str(idx) + ".hc.g.vcf.bgz.tbi"] jobs.append(job) # Create one last job to process the last remaining sequences and 'others' sequences job=gatk.combine_gvcf([ os.path.join("variants", "allSamples." + batch_idx + ".others.hc.g.vcf.bgz") for batch_idx in batches ], os.path.join("variants", "allSamples" + ".others.hc.g.vcf.bgz"), exclude_intervals=unique_sequences_per_job_others) job.name="gatk_combine_gvcf.AllSample" + ".others" job.removable_files=[os.path.join("variants", "allSamples" + ".others.hc.g.vcf.bgz"),os.path.join("variants", "allSamples" + ".others.hc.g.vcf.bgz.tbi")] jobs.append(job) return jobs def merge_and_call_combined_gvcf(self): """ Merges the combined gvcfs and also generates a general vcf containing genotypes. """ jobs = [] nb_haplotype_jobs = config.param('gatk_combine_gvcf', 'nb_haplotype', type='posint') haplotype_file_prefix = os.path.join("variants","allSamples") output_haplotype = os.path.join("variants", "allSamples.hc.g.vcf.bgz") output_haplotype_genotyped = os.path.join("variants", "allSamples.hc.vcf.bgz") if nb_haplotype_jobs > 1: unique_sequences_per_job,unique_sequences_per_job_others = split_by_size(self.sequence_dictionary, nb_haplotype_jobs - 1) gvcfs_to_merge = [haplotype_file_prefix + "." + str(idx) + ".hc.g.vcf.bgz" for idx in xrange(len(unique_sequences_per_job))] gvcfs_to_merge.append(haplotype_file_prefix + ".others.hc.g.vcf.bgz") job = gatk.cat_variants(gvcfs_to_merge, output_haplotype) job.name = "merge_and_call_combined_gvcf.merge.AllSample" jobs.append(job) job = gatk.genotype_gvcf([output_haplotype], output_haplotype_genotyped ,config.param('gatk_merge_and_call_combined_gvcfs', 'options')) job.name = "merge_and_call_combined_gvcf.call.AllSample" jobs.append(job) return jobs def variant_recalibrator(self): """ GATK VariantRecalibrator. The purpose of the variant recalibrator is to assign a well-calibrated probability to each variant call in a call set. You can then create highly accurate call sets by filtering based on this single estimate for the accuracy of each call. The approach taken by variant quality score recalibration is to develop a continuous, covarying estimate of the relationship between SNP call annotations (QD, MQ, HaplotypeScore, and ReadPosRankSum, for example) and the probability that a SNP is a true genetic variant versus a sequencing or data processing artifact. This model is determined adaptively based on "true sites" provided as input, typically HapMap 3 sites and those sites found to be polymorphic on the Omni 2.5M SNP chip array. This adaptive error model can then be applied to both known and novel variation discovered in the call set of interest to evaluate the probability that each call is real. The score that gets added to the INFO field of each variant is called the VQSLOD. It is the log odds ratio of being a true variant versus being false under the trained Gaussian mixture model. Using the tranche file generated by the previous step the ApplyRecalibration walker looks at each variant's VQSLOD value and decides which tranche it falls in. Variants in tranches that fall below the specified truth sensitivity filter level have their filter field annotated with its tranche level. This will result in a call set that simultaneously is filtered to the desired level but also has the information necessary to pull out more variants for a higher sensitivity but a slightly lower quality level. """ jobs = [] #generate the recalibration tranche files output_directory = "variants" recal_snps_other_options = config.param('variant_recalibrator', 'tranch_other_options_snps') recal_indels_other_options = config.param('variant_recalibrator', 'tranch_other_options_indels') variant_recal_snps_prefix = os.path.join(output_directory, "allSamples.hc.snps") variant_recal_indels_prefix = os.path.join(output_directory, "allSamples.hc.indels") jobs.append(concat_jobs([ Job(command="mkdir -p " + output_directory), gatk.variant_recalibrator( [os.path.join(output_directory, "allSamples.hc.vcf.bgz")], recal_snps_other_options, variant_recal_snps_prefix + ".recal", variant_recal_snps_prefix + ".tranches", variant_recal_snps_prefix + ".R"), gatk.variant_recalibrator( [os.path.join(output_directory, "allSamples.hc.vcf.bgz")], recal_indels_other_options, variant_recal_indels_prefix + ".recal", variant_recal_indels_prefix + ".tranches", variant_recal_indels_prefix + ".R") ], name="variant_recalibrator.tranch.allSamples")) #aply the recalibration apply_snps_other_options = config.param('variant_recalibrator', 'apply_other_options_snps') apply_indels_other_options = config.param('variant_recalibrator', 'apply_other_options_indels') variant_apply_snps_prefix = os.path.join(output_directory, "allSamples.hc.snps") variant_apply_indels_prefix = os.path.join(output_directory, "allSamples.hc.indels") jobs.append(concat_jobs([ Job(command="mkdir -p " + output_directory), gatk.apply_recalibration( os.path.join(output_directory, "allSamples.hc.vcf.bgz"), variant_apply_snps_prefix + ".recal", variant_apply_snps_prefix + ".tranches", apply_snps_other_options, variant_apply_snps_prefix + "_raw_indels.genotyped.vqsr.vcf.bgz"), gatk.apply_recalibration( variant_apply_snps_prefix + "_raw_indels.genotyped.vqsr.vcf.bgz", variant_apply_indels_prefix + ".recal", variant_apply_indels_prefix + ".tranches", apply_indels_other_options, os.path.join(output_directory, "allSamples.hc.vqsr.vcf")) ], name="variant_recalibrator.apply.allSamples")) return jobs def dna_sample_metrics(self): """ Merge metrics. Read metrics per sample are merged at this step. """ #get library type library = "SINGLE_END" for readset in self.readsets: if readset.run_type == "PAIRED_END" : library="PAIRED_END" trim_metrics_file = os.path.join("metrics", "trimSampleTable.tsv") metrics_file = os.path.join("metrics", "SampleMetrics.stats") report_metrics_file = os.path.join("report", "sequenceAlignmentTable.tsv") report_file = os.path.join("report", "DnaSeq.dna_sample_metrics.md") job = concat_jobs([ Job(command="mkdir -p metrics"), metrics.dna_sample_metrics("alignment", metrics_file, library), Job( [metrics_file], [report_file], [['dna_sample_metrics', 'module_pandoc']], # Ugly awk to merge sample metrics with trim metrics if they exist; knitr may do this better command="""\ mkdir -p report && \\ if [[ -f {trim_metrics_file} ]] then awk -F"\t" 'FNR==NR{{raw_reads[$1]=$2; surviving_reads[$1]=$3; surviving_pct[$1]=$4; next}}{{OFS="\t"; if ($2=="Mapped Reads"){{mapped_pct="Mapped %"}} else {{mapped_pct=($2 / surviving_reads[$1] * 100)}}; printf $1"\t"raw_reads[$1]"\t"surviving_reads[$1]"\t"surviving_pct[$1]"\t"$2"\t"mapped_pct; for (i = 3; i<= NF; i++) {{printf "\t"$i}}; print ""}}' \\ {trim_metrics_file} \\ {metrics_file} \\ > {report_metrics_file} else cp {metrics_file} {report_metrics_file} fi && \\ sequence_alignment_table_md=`if [[ -f {trim_metrics_file} ]] ; then cut -f1-10 {report_metrics_file} | LC_NUMERIC=en_CA awk -F "\t" '{{OFS="|"; if (NR == 1) {{$1 = $1; print $0; print "-----|-----:|-----:|-----:|-----:|-----:|-----:|-----:|-----:|-----"}} else {{print $1, sprintf("%\\47d", $2), sprintf("%\\47d", $3), sprintf("%.1f", $4), sprintf("%\\47d", $5), sprintf("%.1f", $6), sprintf("%\\47d", $7), sprintf("%\\47d", $8), sprintf("%.1f", $9), $10}}}}' ; else cut -f1-6 {report_metrics_file} | LC_NUMERIC=en_CA awk -F "\t" '{{OFS="|"; if (NR == 1) {{$1 = $1; print $0; print "-----|-----:|-----:|-----:|-----:|-----"}} else {{print $1, sprintf("%\\47d", $2), sprintf("%\\47d", $3), sprintf("%\\47d", $4), sprintf("%.1f", $5), $6}}}}' ; fi` pandoc \\ {report_template_dir}/{basename_report_file} \\ --template {report_template_dir}/{basename_report_file} \\ --variable sequence_alignment_table="$sequence_alignment_table_md" \\ --to markdown \\ > {report_file}""".format( report_template_dir=self.report_template_dir, trim_metrics_file=trim_metrics_file, metrics_file=metrics_file, basename_report_file=os.path.basename(report_file), report_metrics_file=report_metrics_file, report_file=report_file ), report_files=[report_file] ) ], name="dna_sample_metrics") job.input_files = [os.path.join("alignment", sample.name, sample.name + ".sorted.dup.metrics") for sample in self.samples] if library == "PAIRED_END" : job.input_files += [os.path.join("alignment", sample.name, sample.name + ".sorted.dup.recal.all.metrics.insert_size_metrics") for sample in self.samples] return [job] def generate_approximate_windows(self, nb_jobs): if nb_jobs <= len(self.sequence_dictionary): return [sequence['name'] + ":1-" + str(sequence['length']) for sequence in self.sequence_dictionary] else: total_length = sum([sequence['length'] for sequence in self.sequence_dictionary]) approximate_window_size = int(math.floor(total_length / (nb_jobs - len(self.sequence_dictionary)))) windows = [] for sequence in self.sequence_dictionary: for start, end in [[pos, min(pos + approximate_window_size - 1, sequence['length'])] for pos in range(1, sequence['length'] + 1, approximate_window_size)]: windows.append(sequence['name'] + ":" + str(start) + "-" + str(end)) return windows def rawmpileup(self): """ Full pileup (optional). A raw mpileup file is created using samtools mpileup and compressed in gz format. One packaged mpileup file is created per sample/chromosome. """ jobs = [] for sample in self.samples: mpileup_directory = os.path.join("alignment", sample.name, "mpileup") for sequence in self.sequence_dictionary: output = os.path.join(mpileup_directory, sample.name + "." + sequence['name'] + ".mpileup.gz") jobs.append(concat_jobs([ Job(command="mkdir -p " + mpileup_directory), pipe_jobs([ samtools.mpileup([os.path.join("alignment", sample.name, sample.name + ".sorted.dup.recal.bam")], None, config.param('rawmpileup', 'mpileup_other_options'), sequence['name']), Job(output_files=[output], command="gzip -1 -c > " + output) ])], name="rawmpileup." + sample.name + "." + sequence['name'])) return jobs def rawmpileup_cat(self): """ Merge mpileup files per sample/chromosome into one compressed gzip file per sample. """ jobs = [] for sample in self.samples: mpileup_file_prefix = os.path.join("alignment", sample.name, "mpileup", sample.name + ".") mpileup_inputs = [mpileup_file_prefix + sequence['name'] + ".mpileup.gz" for sequence in self.sequence_dictionary] gzip_output = mpileup_file_prefix + "mpileup.gz" job = Job(mpileup_inputs, [gzip_output]) job.command = "zcat \\\n " + " \\\n ".join(mpileup_inputs) + " | \\\n gzip -c --best > " + gzip_output job.name = "rawmpileup_cat." + sample.name jobs.append(job) return jobs def snp_and_indel_bcf(self): """ Mpileup and Variant calling. Variants (SNPs and INDELs) are called using [SAMtools](http://samtools.sourceforge.net/) mpileup. bcftools view is used to produce binary bcf files. """ jobs = [] input_bams = [os.path.join("alignment", sample.name, sample.name + ".sorted.dup.recal.bam") for sample in self.samples] nb_jobs = config.param('snp_and_indel_bcf', 'approximate_nb_jobs', type='posint') output_directory = "variants/rawBCF" if nb_jobs == 1: jobs.append(concat_jobs([ Job(command="mkdir -p " + output_directory), pipe_jobs([ samtools.mpileup(input_bams, None, config.param('snp_and_indel_bcf', 'mpileup_other_options')), samtools.bcftools_call("-", os.path.join(output_directory, "allSamples.bcf"), config.param('snp_and_indel_bcf', 'bcftools_other_options')), ])], name="snp_and_indel_bcf.allSamples")) else: for region in self.generate_approximate_windows(nb_jobs): jobs.append(concat_jobs([ Job(command="mkdir -p " + output_directory), pipe_jobs([ samtools.mpileup(input_bams, None, config.param('snp_and_indel_bcf', 'mpileup_other_options'), region), samtools.bcftools_call("-", os.path.join(output_directory, "allSamples." + region + ".bcf"), config.param('snp_and_indel_bcf', 'bcftools_other_options')), ])], name="snp_and_indel_bcf.allSamples." + re.sub(":", "_", region))) return jobs def merge_filter_bcf(self): """ bcftools is used to merge the raw binary variants files created in the snpAndIndelBCF step. The output of bcftools is fed to varfilter, which does an additional filtering of the variants and transforms the output into the VCF (.vcf) format. One vcf file contain the SNP/INDEL calls for all samples in the experiment. """ jobs = [] nb_jobs = config.param('snp_and_indel_bcf', 'approximate_nb_jobs', type='posint') if nb_jobs == 1: inputs = ["variants/rawBCF/allSamples.bcf"] else: inputs = ["variants/rawBCF/allSamples." + region + ".bcf" for region in self.generate_approximate_windows(nb_jobs)] output_file_prefix = "variants/allSamples.merged." bcf = output_file_prefix + "bcf" jobs.append(concat_jobs([ samtools.bcftools_cat(inputs, bcf), samtools.bcftools_view(bcf, output_file_prefix + "flt.vcf") ], name = "merge_filter_bcf")) report_file = os.path.join("report", "DnaSeq.merge_filter_bcf.md") jobs.append( Job( [output_file_prefix + "flt.vcf"], [report_file], command="""\ mkdir -p report && \\ cp \\ {report_template_dir}/{basename_report_file} \\ {report_template_dir}/HumanVCFformatDescriptor.tsv \\ report/ && \\ sed 's/\t/|/g' report/HumanVCFformatDescriptor.tsv | sed '2i-----|-----' >> {report_file}""".format( report_template_dir=self.report_template_dir, basename_report_file=os.path.basename(report_file), report_file=report_file ), report_files=[report_file], name="merge_filter_bcf_report") ) return jobs def filter_nstretches(self, input_vcf = "variants/allSamples.merged.flt.vcf", output_vcf = "variants/allSamples.merged.flt.NFiltered.vcf", job_name = "filter_nstretches" ): """ The final .vcf files are filtered for long 'N' INDELs which are sometimes introduced and cause excessive memory usage by downstream tools. """ job = tools.filter_long_indel(input_vcf, output_vcf) job.name = job_name return [job] def haplotype_caller_filter_nstretches(self): """ See general filter_nstretches description ! Applied to haplotype caller vcf """ # Find input vcf first from VSQR, then from non recalibrate hapotype calleroriginal BAMs in the readset sheet. hc_vcf = self.select_input_files([["variants/allSamples.hc.vqsr.vcf"],["variants/allSamples.hc.vcf.bgz"]]) job = self.filter_nstretches(hc_vcf[0], "variants/allSamples.hc.vqsr.NFiltered.vcf", "haplotype_caller_filter_nstretches") return job def mpileup_filter_nstretches(self): """ See general filter_nstretches description ! Applied to mpileup vcf """ job = self.filter_nstretches("variants/allSamples.merged.flt.vcf", "variants/allSamples.merged.flt.NFiltered.vcf", "mpileup_filter_nstretches") return job def flag_mappability(self, input_vcf = "variants/allSamples.merged.flt.NFiltered.vcf", output_vcf = "variants/allSamples.merged.flt.mil.vcf" ,job_name = "flag_mappability" ): """ Mappability annotation. An in-house database identifies regions in which reads are confidently mapped to the reference genome. """ job = vcftools.annotate_mappability(input_vcf, output_vcf) job.name = job_name return [job] def haplotype_caller_flag_mappability(self) : """ See general flag_mappability ! Applied to haplotype caller vcf """ job = self.flag_mappability("variants/allSamples.hc.vqsr.NFiltered.vcf", "variants/allSamples.hc.vqsr.mil.vcf", "haplotype_caller_flag_mappability" ) return job def mpileup_flag_mappability(self) : """ See general flag_mappability ! Applied to mpileup vcf """ job = self.flag_mappability("variants/allSamples.merged.flt.NFiltered.vcf", "variants/allSamples.merged.flt.mil.vcf", "mpileup_flag_mappability") return job def snp_id_annotation(self, input_vcf = "variants/allSamples.merged.flt.mil.vcf", output_vcf = "variants/allSamples.merged.flt.mil.snpId.vcf" , job_name = "snp_id_annotation"): """ dbSNP annotation. The .vcf files are annotated for dbSNP using the software SnpSift (from the [SnpEff suite](http://snpeff.sourceforge.net/)). """ job = snpeff.snpsift_annotate(input_vcf, output_vcf) job.name = job_name return [job] def haplotype_caller_snp_id_annotation(self): """ See general snp_id_annotation ! Applied to haplotype caller vcf """ job = self.snp_id_annotation("variants/allSamples.hc.vqsr.mil.vcf", "variants/allSamples.hc.vqsr.mil.snpId.vcf", "haplotype_caller_snp_id_annotation") return job def mpileup_snp_id_annotation(self): """ See general snp_id_annotation ! Applied to mpileyp vcf """ job = self.snp_id_annotation("variants/allSamples.merged.flt.mil.vcf", "variants/allSamples.merged.flt.mil.snpId.vcf" , "mpileup_snp_id_annotation") return job def snp_effect(self, input_vcf = "variants/allSamples.merged.flt.mil.snpId.vcf", snpeff_file = "variants/allSamples.merged.flt.mil.snpId.snpeff.vcf", job_name = "snp_effect"): """ Variant effect annotation. The .vcf files are annotated for variant effects using the SnpEff software. SnpEff annotates and predicts the effects of variants on genes (such as amino acid changes). """ report_file = "report/DnaSeq.snp_effect.md" jobs = [] job = snpeff.compute_effects(input_vcf, snpeff_file, split=True) job.name = job_name jobs.append(job) jobs.append(Job( [snpeff_file], [report_file], command="""\ mkdir -p report && \\ cp \\ {report_template_dir}/{basename_report_file} \\ report/""".format( report_template_dir=self.report_template_dir, basename_report_file=os.path.basename(report_file), report_file=report_file ), report_files=[report_file], name = job_name + "_report" ) ) return jobs def haplotype_caller_snp_effect(self): """ See general snp_effect ! Applied to haplotype caller vcf """ jobs = self.snp_effect("variants/allSamples.hc.vqsr.mil.snpId.vcf", "variants/allSamples.hc.vqsr.mil.snpId.snpeff.vcf", "haplotype_caller_snp_effect") return jobs def mpileup_snp_effect(self): """ See general snp_effect ! Applied to mpileup vcf """ jobs = self.snp_effect("variants/allSamples.merged.flt.mil.snpId.vcf", "variants/allSamples.merged.flt.mil.snpId.snpeff.vcf", "mpileup_snp_effect") return jobs def dbnsfp_annotation(self, input_vcf = "variants/allSamples.merged.flt.mil.snpId.snpeff.vcf", output_vcf = "variants/allSamples.merged.flt.mil.snpId.snpeff.dbnsfp.vcf", job_name = "dbnsfp_annotation"): """ Additional SVN annotations. Provides extra information about SVN by using numerous published databases. Applicable to human samples. Databases available include Biomart (adds GO annotations based on gene information) and dbNSFP (an integrated database of functional annotations from multiple sources for the comprehensive collection of human non-synonymous SNPs. It compiles prediction scores from four prediction algorithms (SIFT, Polyphen2, LRT and MutationTaster), three conservation scores (PhyloP, GERP++ and SiPhy) and other function annotations). """ job = snpeff.snpsift_dbnsfp(input_vcf, output_vcf) job.name = job_name return [job] def haplotype_caller_dbnsfp_annotation(self): """ See general dbnsfp_annotation ! Applied to haplotype caller vcf """ job = self.dbnsfp_annotation("variants/allSamples.hc.vqsr.mil.snpId.snpeff.vcf", "variants/allSamples.hc.vqsr.mil.snpId.snpeff.dbnsfp.vcf", "haplotype_caller_dbnsfp_annotation") return job def mpileup_dbnsfp_annotation(self): """ See general dbnsfp_annotation ! Applied to mpileup vcf """ job = self.dbnsfp_annotation("variants/allSamples.merged.flt.mil.snpId.snpeff.vcf", "variants/allSamples.merged.flt.mil.snpId.snpeff.dbnsfp.vcf", "mpileup_dbnsfp_annotation") return job def metrics_vcf_stats(self, variants_file_prefix = "variants/allSamples.merged.flt.mil.snpId" , job_name = "metrics_change_rate"): """ Metrics SNV. Multiple metrics associated to annotations and effect prediction are generated at this step: change rate by chromosome, changes by type, effects by impact, effects by functional class, counts by effect, counts by genomic region, SNV quality, coverage, InDel lengths, base changes, transition-transversion rates, summary of allele frequencies, codon changes, amino acid changes, changes per chromosome, change rates. """ job = metrics.vcf_stats(variants_file_prefix + ".vcf", variants_file_prefix + ".snpeff.vcf.part_changeRate.tsv", variants_file_prefix + ".snpeff.vcf.statsFile.txt") job.name = job_name return [job] def haplotype_caller_metrics_vcf_stats(self): """ See general metrics_vcf_stats ! Applied to haplotype caller vcf """ job = self.metrics_vcf_stats("variants/allSamples.hc.vqsr.mil.snpId", "haplotype_caller_metrics_change_rate") return job def mpileup_metrics_vcf_stats(self): """ See general metrics_vcf_stats ! Applied to mpileup caller vcf """ job = self.metrics_vcf_stats("variants/allSamples.merged.flt.mil.snpId" , "mpileup_metrics_change_rate") return job def metrics_snv_graph_metrics(self, variants_file_prefix = "variants/allSamples.merged.flt.mil.snpId", snv_metrics_prefix = "metrics/allSamples.SNV", job_name = "metrics_snv_graph"): """ """ report_file = "report/DnaSeq.metrics_snv_graph_metrics.md" snv_metrics_files = [snv_metrics_prefix + ".SummaryTable.tsv", snv_metrics_prefix + ".EffectsFunctionalClass.tsv", snv_metrics_prefix + ".EffectsImpact.tsv"] job = metrics.snv_graph_metrics(variants_file_prefix + ".snpeff.vcf.statsFile.txt", snv_metrics_prefix) job.output_files = snv_metrics_files job.name = job_name return [concat_jobs([ job, Job( snv_metrics_files, [report_file], [[job_name + "_report", 'module_pandoc']], command="""\ mkdir -p report && \\ paste \\ <(echo -e "Number of variants before filter\nNumber of not variants\n%\nNumber of variants processed\nNumber of known variants\n%\nTransitions\nTransversions\nTs Tv ratio\nmissense\nnonsense\nsilent\nmissense silent ratio\nhigh impact\nlow impact\nmoderate impact\nmodifier impact") \\ <(paste \\ {snv_metrics_prefix}.SummaryTable.tsv \\ {snv_metrics_prefix}.EffectsFunctionalClass.tsv \\ <(sed '1d' {snv_metrics_prefix}.EffectsImpact.tsv) \\ | sed '1d' | sed 's/\t/\\n/g') \\ > report/SNV.SummaryTable.tsv snv_summary_table_md=`sed 's/\t/|/g' report/SNV.SummaryTable.tsv` pandoc \\ {report_template_dir}/{basename_report_file} \\ --template {report_template_dir}/{basename_report_file} \\ --variable snv_summary_table="$snv_summary_table_md" \\ --to markdown \\ > {report_file} for file in SNVQuality IndelLength CountRegions CountEffects BaseChange codonChange AminoAcidChange changeRate TsTv do for ext in jpeg pdf tsv do cp \\ {snv_metrics_prefix}.$file.$ext \\ report/SNV.$file.$ext done done cp {snv_metrics_prefix}.chromosomeChange.zip report/SNV.chromosomeChange.zip""".format( report_template_dir=self.report_template_dir, basename_report_file=os.path.basename(report_file), snv_metrics_prefix=snv_metrics_prefix, report_file=report_file ), report_files=[report_file] ) ], name=job_name + "_report")] def haplotype_caller_metrics_snv_graph_metrics(self): """ See general metrics_vcf_stats ! Applied to haplotype caller vcf """ jobs = self.metrics_snv_graph_metrics("variants/allSamples.hc.vqsr.mil.snpId", "metrics/allSamples.hc.vqsr.SNV", "haplotype_caller_metrics_snv_graph") return jobs def mpileup_metrics_snv_graph_metrics(self): """ See general metrics_vcf_stats ! Applied to mpileup vcf """ jobs = self.metrics_snv_graph_metrics("variants/allSamples.merged.flt.mil.snpId", "metrics/allSamples.mpileup.SNV", "mpileup_metrics_snv_graph") return jobs @property def steps(self): return [ self.picard_sam_to_fastq, self.trimmomatic, self.merge_trimmomatic_stats, self.bwa_mem_picard_sort_sam, self.picard_merge_sam_files, self.gatk_indel_realigner, self.merge_realigned, self.fix_mate_by_coordinate, self.picard_mark_duplicates, self.recalibration, self.metrics, self.picard_calculate_hs_metrics, self.gatk_callable_loci, self.extract_common_snp_freq, self.baf_plot, self.gatk_haplotype_caller, self.merge_and_call_individual_gvcf, self.combine_gvcf, self.merge_and_call_combined_gvcf, self.variant_recalibrator, self.dna_sample_metrics, self.haplotype_caller_filter_nstretches, self.haplotype_caller_flag_mappability, self.haplotype_caller_snp_id_annotation, self.haplotype_caller_snp_effect, self.haplotype_caller_dbnsfp_annotation, self.haplotype_caller_metrics_vcf_stats, self.haplotype_caller_metrics_snv_graph_metrics, self.rawmpileup, self.rawmpileup_cat, self.snp_and_indel_bcf, self.merge_filter_bcf, self.mpileup_filter_nstretches, self.mpileup_flag_mappability, self.mpileup_snp_id_annotation, self.mpileup_snp_effect, self.mpileup_dbnsfp_annotation, self.mpileup_metrics_vcf_stats, self.mpileup_metrics_snv_graph_metrics ] if __name__ == '__main__': DnaSeq()
ccmbioinfo/mugqic_pipelines
pipelines/dnaseq/dnaseq.py
Python
lgpl-3.0
67,158
[ "BWA", "Gaussian" ]
746fe582e44140b18ab5042b5ea485d6a92a428a64f6ab04440e1ab2e7fe1303
# encoding: utf-8 # # Copyright © 2015 ATS Advanced Telematic Systems GmbH # # This file is part of Docker Launcher. # # Docker Launcher is free software: you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the Free # Software Foundation, either version 3 of the License, or (at your option) any # later version. # # Docker Launcher is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the GNU General Public License along with # Docker Launcher. If not, see <http://www.gnu.org/licenses/>. # """Some predefined variables for running ansible""" PLAYBOOK_LOCATION = 'tmp-playbook.yml' INVENTORY_LOCATION = 'tmp-inventory' GALAXY_INSTALL = ["ansible-galaxy", "install", "--ignore-errors", "defunctzombie.coreos-bootstrap", "-p", "roles"] GALAXY_REMOVE = ["ansible-galaxy", "remove", "defunctzombie.coreos-bootstrap", "-p", "roles"] ANSIBLE_CLI = ["ansible-playbook", "-i", "tmp-inventory", "tmp-playbook.yml"] ANSIBLE_VERBOSE_CLI = ["ansible-playbook", "-vvvv", "-i", "tmp-inventory", "tmp-playbook.yml"]
txus/docker-launcher
launcher/ansible/variables.py
Python
gpl-3.0
1,440
[ "Galaxy" ]
a780a17a0432979114661aeeb3755cc89090cd4394b94223bdb6137cc4d550af
#ImportModules import ShareYourSystem as SYS from ShareYourSystem.Specials.Simulaters import Populater,Brianer #Definition MyBrianer=Brianer.BrianerClass( ).update( { 'StimulatingStepTimeFloat':0.1 } ).produce( ['E','I'], Populater.PopulaterClass, { 'PopulatingEquationStr': ''' dv/dt = (ge+gi-(v+49*mV))/(20*ms) : volt dge/dt = -ge/(5*ms) : volt dgi/dt = -gi/(10*ms) : volt ''', 'PopulatingThresholdStr':'v>-50*mV', 'PopulatingResetStr':'v=-60*mV', 'MoniteringStateTuplesList': [ ('v',[0,1],0.1) ], 'MoniteringSpikeTuplesList': [ () ], 'PopulatingInitDict': { 'v':-60. } }, **{'CollectingCollectionStr':'Populatome'} ).__setitem__( 'Dis_<Populatome>', [ { 'PopulatingUnitsInt':3200, 'ConnectingGraspClueVariablesList': [ SYS.GraspDictClass( { 'HintVariable':'/NodePointDeriveNoder/<Populatome>IPopulater', 'SynapsePreStr':'ge+=1.62*mV', 'SynapseProbabilityFloat':0.02, 'BrianClassStr':"Synapse" } ) ] }, { 'PopulatingUnitsInt':800, 'ConnectingGraspClueVariablesList': [ SYS.GraspDictClass( { 'HintVariable':'/NodePointDeriveNoder/<Populatome>EPopulater', 'SynapsePreStr':'gi-=9*mV', 'SynapseProbabilityFloat':0.02 } ) ] } ] ).brian() #Definition the AttestedStr SYS._attest( [ 'MyBrianer is '+SYS._str( MyBrianer, **{ 'RepresentingBaseKeyStrsList':False, 'RepresentingAlineaIsBool':False } ), ] ) #SYS._print(MyBrianer.BrianedMonitorsList[0].__dict__) #SYS._print( # MyBrianer.BrianedNeuronGroupsList[0].__dict__ #) #import matplotlib #plot(MyBrianer['<Connectome>FirstRater'].) #Print
Ledoux/ShareYourSystem
Pythonlogy/draft/Simulaters/Brianer/draft/01_ExampleCell copy.py
Python
mit
1,756
[ "Brian" ]
f46c37501146f405f2423a4ee50792791af60af00cd6741e77625f34db2e8000
# This file is part of xrayutilities. # # xrayutilities is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, see <http://www.gnu.org/licenses/>. # # Copyright (C) 2017-2021 Dominik Kriegner <dominik.kriegner@gmail.com> """ module handling crystal lattice structures. A SGLattice consists of a space group number and the position of atoms specified as Wyckoff positions along with their parameters. Depending on the space group symmetry only certain parameters of the resulting instance will be settable! A cubic lattice for example allows only to set its 'a' lattice parameter but none of the other unit cell shape parameters. """ import copy import fractions import numbers import re from collections import OrderedDict from math import cos, isclose, radians, sin, sqrt import numpy import scipy.optimize from .. import config, cxrayutilities, math, utilities from ..exception import InputError from . import elements from .atom import Atom from .wyckpos import * # space group number to symmetry and number of parameters dictionary sgrp_sym = RangeDict({range(1, 3): ('triclinic', 6), range(3, 16): ('monoclinic', 4), range(16, 75): ('orthorhombic', 3), range(75, 143): ('tetragonal', 2), range(143, 168): ('trigonal', 2), range(168, 195): ('hexagonal', 2), range(195, 231): ('cubic', 1)}) sgrp_name = {'1': 'P1', '2': 'P-1', '3': 'P2', '4': 'P21', '5': 'C2', '6': 'Pm', '7': 'Pc', '8': 'Cm', '9': 'Cc', '10': 'P2/m', '11': 'P21/m', '12': 'C2/m', '13': 'P2/c', '14': 'P21/c', '15': 'C2/c', '16': 'P222', '17': 'P2221', '18': 'P21212', '19': 'P212121', '20': 'C2221', '21': 'C222', '22': 'F222', '23': 'I222', '24': 'I212121', '25': 'Pmm2', '26': 'Pmc21', '27': 'Pcc2', '28': 'Pma2', '29': 'Pca21', '30': 'Pnc2', '31': 'Pmn21', '32': 'Pba2', '33': 'Pna21', '34': 'Pnn2', '35': 'Cmm2', '36': 'Cmc21', '37': 'Ccc2', '38': 'Amm2', '39': 'Aem2', '40': 'Ama2', '41': 'Aea2', '42': 'Fmm2', '43': 'Fdd2', '44': 'Imm2', '45': 'Iba2', '46': 'Ima2', '47': 'Pmmm', '48': 'Pnnn', '49': 'Pccm', '50': 'Pban', '51': 'Pmma', '52': 'Pnna', '53': 'Pmna', '54': 'Pcca', '55': 'Pbam', '56': 'Pccn', '57': 'Pbcm', '58': 'Pnnm', '59': 'Pmmn', '60': 'Pbcn', '61': 'Pbca', '62': 'Pnma', '63': 'Cmcm', '64': 'Cmce', '65': 'Cmmm', '66': 'Cccm', '67': 'Cmme', '68': 'Ccce', '69': 'Fmmm', '70': 'Fddd', '71': 'Immm', '72': 'Ibam', '73': 'Ibca', '74': 'Imma', '75': 'P4', '76': 'P41', '77': 'P42', '78': 'P43', '79': 'I4', '80': 'I41', '81': 'P-4', '82': 'I-4', '83': 'P4/m', '84': 'P42/m', '85': 'P4/n', '86': 'P42/n', '87': 'I4/m', '88': 'I41/a', '89': 'P422', '90': 'P4212', '91': 'P4122', '92': 'P41212', '93': 'P4222', '94': 'P42212', '95': 'P4322', '96': 'P43212', '97': 'I422', '98': 'I4122', '99': 'P4mm', '100': 'P4bm', '101': 'P42cm', '102': 'P42nm', '103': 'P4cc', '104': 'P4nc', '105': 'P42mc', '106': 'P42bc', '107': 'I4mm', '108': 'I4cm', '109': 'I41md', '110': 'I41cd', '111': 'P-42m', '112': 'P-42c', '113': 'P-421m', '114': 'P-421c', '115': 'P-4m2', '116': 'P-4c2', '117': 'P-4b2', '118': 'P-4n2', '119': 'I-4m2', '120': 'I-4c2', '121': 'I-42m', '122': 'I-42d', '123': 'P4/mmm', '124': 'P4/mcc', '125': 'P4/nbm', '126': 'P4/nnc', '127': 'P4/mbm', '128': 'P4/mnc', '129': 'P4/nmm', '130': 'P4/ncc', '131': 'P42/mmc', '132': 'P42/mcm', '133': 'P42/nbc', '134': 'P42/nnm', '135': 'P42/mbc', '136': 'P42/mnm', '137': 'P42/nmc', '138': 'P42/ncm', '139': 'I4/mmm', '140': 'I4/mcm', '141': 'I41/amd', '142': 'I41/acd', '143': 'P3', '144': 'P31', '145': 'P32', '146': 'R3', '147': 'P-3', '148': 'R-3', '149': 'P312', '150': 'P321', '151': 'P3112', '152': 'P3121', '153': 'P3212', '154': 'P3221', '155': 'R32', '156': 'P3m1', '157': 'P31m', '158': 'P3c1', '159': 'P31c', '160': 'R3m', '161': 'R3c', '162': 'P-31m', '163': 'P-31c', '164': 'P-3m1', '165': 'P-3c1', '166': 'R-3m', '167': 'R-3c', '168': 'P6', '169': 'P61', '170': 'P65', '171': 'P62', '172': 'P64', '173': 'P63', '174': 'P-6', '175': 'P6/m', '176': 'P63/m', '177': 'P622', '178': 'P6122', '179': 'P6522', '180': 'P6222', '181': 'P6422', '182': 'P6322', '183': 'P6mm', '184': 'P6cc', '185': 'P63cm', '186': 'P63mc', '187': 'P-6m2', '188': 'P-6c2', '189': 'P-62m', '190': 'P-62c', '191': 'P6/mmm', '192': 'P6/mcc', '193': 'P63/mcm', '194': 'P63/mmc', '195': 'P23', '196': 'F23', '197': 'I23', '198': 'P213', '199': 'I213', '200': 'Pm-3', '201': 'Pn-3', '202': 'Fm-3', '203': 'Fd-3', '204': 'Im-3', '205': 'Pa-3', '206': 'Ia-3', '207': 'P432', '208': 'P4232', '209': 'F432', '210': 'F4132', '211': 'I432', '212': 'P4332', '213': 'P4132', '214': 'I4132', '215': 'P-43m', '216': 'F-43m', '217': 'I-43m', '218': 'P-43n', '219': 'F-43c', '220': 'I-43d', '221': 'Pm-3m', '222': 'Pn-3n', '223': 'Pm-3n', '224': 'Pn-3m', '225': 'Fm-3m', '226': 'Fm-3c', '227': 'Fd-3m', '228': 'Fd-3c', '229': 'Im-3m', '230': 'Ia-3d'} sgrp_params = {'cubic:1': (('a', ), ('a', 'a', 'a', 90, 90, 90)), 'cubic:2': (('a', ), ('a', 'a', 'a', 90, 90, 90)), 'cubic': (('a', ), ('a', 'a', 'a', 90, 90, 90)), 'hexagonal': (('a', 'c'), ('a', 'a', 'c', 90, 90, 120)), 'trigonal:R': (('a', 'alpha'), ('a', 'a', 'a', 'alpha', 'alpha', 'alpha')), 'trigonal:H': (('a', 'c'), ('a', 'a', 'c', 90, 90, 120)), 'trigonal': (('a', 'c'), ('a', 'a', 'c', 90, 90, 120)), 'tetragonal:1': (('a', 'c'), ('a', 'a', 'c', 90, 90, 90)), 'tetragonal:2': (('a', 'c'), ('a', 'a', 'c', 90, 90, 90)), 'tetragonal': (('a', 'c'), ('a', 'a', 'c', 90, 90, 90)), 'orthorhombic:1': (('a', 'b', 'c'), ('a', 'b', 'c', 90, 90, 90)), 'orthorhombic:2': (('a', 'b', 'c'), ('a', 'b', 'c', 90, 90, 90)), 'orthorhombic': (('a', 'b', 'c'), ('a', 'b', 'c', 90, 90, 90)), 'monoclinic:b': (('a', 'b', 'c', 'beta'), ('a', 'b', 'c', 90, 'beta', 90)), 'monoclinic:c': (('a', 'b', 'c', 'gamma'), ('a', 'b', 'c', 90, 90, 'gamma')), 'monoclinic': (('a', 'b', 'c', 'beta'), ('a', 'b', 'c', 90, 'beta', 90)), 'triclinic': (('a', 'b', 'c', 'alpha', 'beta', 'gamma'), ('a', 'b', 'c', 'alpha', 'beta', 'gamma'))} # regular expression for splitting multiple reflection conditions hklcond_group = re.compile(r'([-hkil0-9\(\)]+): ([-+hklnor1-8=\s,]+)(?:, |$)') def get_possible_sgrp_suf(sgrp_nr): """ determine possible space group suffix. Multiple suffixes might be possible for one space group due to different origin choice, unique axis, or choice of the unit cell shape. Parameters ---------- sgrp_nr : int space group number Returns ------- str or list either an empty string or a list of possible valid suffix strings """ sgrp_suf = '' if sgrp_nr in [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]: sgrp_suf = [':b', ':c'] elif sgrp_nr in [48, 50, 59, 68, 70, 85, 86, 88, 125, 126, 129, 130, 133, 134, 137, 138, 141, 142, 201, 203, 222, 224, 227, 228]: sgrp_suf = [':1', ':2'] elif sgrp_nr in [146, 148, 155, 160, 161, 166, 167]: sgrp_suf = [':H', ':R'] return sgrp_suf def get_default_sgrp_suf(sgrp_nr): """ determine default space group suffix """ possibilities = get_possible_sgrp_suf(sgrp_nr) if possibilities: return possibilities[0] else: return '' def _get_pardict(parint, x): """ internal helper function to determine an parameter dictionary needed for the evaluation of Wyckoff positions Parameters ---------- parint : int integer number specifying the parameters (binary code, see implementation) x : tuple, list parameter values in an apropriate length list or tuple Returns ------- dict """ i = 0 pardict = {} if parint & 1: pardict['x'] = x[i] i += 1 if parint & 2: pardict['y'] = x[i] i += 1 if parint & 4: pardict['z'] = x[i] return pardict def testwp(parint, wp, cifpos, digits=config.DIGITS): """ test if a Wyckoff position can describe the given position from a CIF file Parameters ---------- parint : int telling which Parameters the given Wyckoff position has wp : str or tuple expression of the Wyckoff position cifpos : list, or tuple or array-like (x, y, z) position of the atom in the CIF file digits : int number of digits for which for a comparison of floating point numbers will be rounded to. By default xu.config.DIGITS is used. Returns ------- foundflag : bool flag to tell if the positions match pars : array-like or None parameters associated with the position or None if no parameters are needed """ def check_positions_match(p1, p2, digits): p1 = p1 - numpy.round(p1, digits) // 1 p2 = p2 - numpy.round(p2, digits) // 1 if numpy.round(p1, digits) == numpy.round(p2, digits): return True else: return False wyckp = wp.strip('()').split(',') # test agreement in positions witout variables match = [False, False, False] variables = [] for i in range(3): v = re.findall(r'[xyz]', wyckp[i]) if v == []: pos = eval(wyckp[i]) match[i] = check_positions_match(pos, cifpos[i], digits) if not match[i]: return False, None else: variables += v if all(match): return True, None # check if with proper choice of the variables a correspondence of the # positions can be obtained def fmin(x, parint, wyckp, cifpos): evalexp = [] cifp = [] for i in range(3): if not match[i]: evalexp.append(wyckp[i]) cifp.append(cifpos[i]) pardict = _get_pardict(parint, x) wpos = [eval(e, pardict) for e in evalexp] return numpy.linalg.norm(numpy.subtract(wpos, cifp)) x0 = [] if 'x' in variables: x0.append(cifpos[0]) if 'y' in variables: x0.append(cifpos[1]) if 'z' in variables: x0.append(cifpos[2]) opt = scipy.optimize.minimize(fmin, x0, args=(parint, wyckp, cifpos)) pardict = _get_pardict(parint, opt.x) for i in range(3): if not match[i]: pos = eval(wyckp[i], pardict) match[i] = check_positions_match(pos, cifpos[i], digits) if all(match): return True, list(opt.x) else: return False, None def get_wyckpos(sgrp, atompos): """ test all Wyckoff positions on every atomic position Parameters ---------- sgrp : str space group name atompos : list list of atomic positions to identify. All atomic positions are expected to belong to one and the same Wyckoff position! Returns ------- position argument for WyckoffBase.append """ for k, wyckpos in wp[sgrp].items(): parint, poslist, reflcond = wyckpos item = poslist[0] for pos in atompos: foundwp, par = testwp(parint, item, pos) if foundwp: return k if par is None else (k, list(par)) class WyckoffBase(list): """ The WyckoffBase class implements a container for a set of Wyckoff positions that form the base of a crystal lattice. An instance of this class can be treated as a simple container object. """ def __init__(self, *args, **kwargs): list.__init__(self, *args, **kwargs) @staticmethod def _checkatom(atom): if isinstance(atom, str): atom = getattr(elements, atom) elif not isinstance(atom, Atom): raise TypeError("atom must be an instance of class " "xrayutilities.materials.Atom") return atom @staticmethod def _checkpos(pos): if isinstance(pos, str): pos = (pos, None) elif isinstance(pos, (tuple, list)): if len(pos) == 1: pos = (pos[0], None) elif len(pos) == 2: if isinstance(pos[1], numbers.Number): pos = (pos[0], (pos[1], )) elif pos[1] is None: pos = (pos[0], None) else: pos = (pos[0], tuple(pos[1])) else: if isinstance(pos[1], numbers.Number): pos = (pos[0], tuple(pos[1:])) return pos def append(self, atom, pos, occ=1.0, b=0.): """ add new Atom to the lattice base Parameters ---------- atom : Atom object to be added pos : tuple or str Wyckoff position of the atom, along with its parameters. Examples: ('2i', (0.1, 0.2, 0.3)), or '1a' occ : float, optional occupancy (default=1.0) b : float, optional b-factor of the atom used as exp(-b*q**2/(4*pi)**2) to reduce the intensity of this atom (only used in case of temp=0 in StructureFactor and chi calculation) """ atom = self._checkatom(atom) pos = self._checkpos(pos) list.append(self, (atom, pos, occ, b)) def __setitem__(self, key, data): (atom, pos, occ, b) = data atom = self._checkatom(atom) pos = self._checkpos(pos) list.__setitem__(self, key, (atom, pos, float(occ), float(b))) def __copy__(self): """ since we use a custom 'append' method we need to overwrite copy """ cls = self.__class__ new = cls.__new__(cls) for item in self: new.append(*item) return new def __deepcopy__(self, memo): cls = self.__class__ new = cls.__new__(cls) memo[id(self)] = new for item in self: citem = copy.deepcopy(item, memo) new.append(*citem) return new def __str__(self): ostr = '' for i, (atom, p, occ, b) in enumerate(self): ostr += '%d: %s %s ' % (i, str(atom), p[0]) if p[1] is not None: ostr += ' '.join(map(utilities.frac2str, p[1])) ostr += ' occ=%5.3f b=%5.3f\n' % (occ, b) return ostr def __contains__(self, item): """ check if this list contains already the same element, at the same position, and with the same Debye Waller factor. The occupancy is not checked intentionally. Parameters ---------- item : tuple or list WyckoffBase entry to check if its present in this list Returns ------- bool """ for atom, p, occ, b in self: if (atom == item[0] and self.pos_eq(p, item[1]) and isclose(b, item[3], abs_tol=1e-4)): return True return False @staticmethod def entry_eq(e1, e2): """ compare two entries including all its properties to be equal Parameters ---------- e1, e2: tuple tuples with length 4 containing the entries of WyckoffBase which should be compared """ if (e1[0] == e2[0] and WyckoffBase.pos_eq(e1[1], e2[1]) and numpy.allclose(e1[2:], e2[2:], atol=1e-4)): return True return False @staticmethod def pos_eq(pos1, pos2): """ compare Wyckoff positions Parameters ---------- pos1, pos2: tuple tuples with Wyckoff label and optional parameters """ if pos1[0] != pos2[0]: return False if pos1[1] == pos2[1]: return True else: for f1, f2 in zip(pos1[1], pos2[1]): if not isclose(f1 % 1, f2 % 1, abs_tol=1e-5): return False return True def index(self, item): """ return the index of the atom (same element, position, and Debye Waller factor). The occupancy is not checked intentionally. If the item is not present a ValueError is raised. Parameters ---------- item : tuple or list WyckoffBase entry Returns ------- int """ for i, (atom, p, occ, b) in enumerate(self): if (atom == item[0] and self.pos_eq(p, item[1]) and isclose(b, item[3], abs_tol=1e-4)): return i raise ValueError("%s is not in list" % str(item)) class SymOp(object): """ Class descriping a symmetry operation in a crystal. The symmetry operation is characterized by a 3x3 transformation matrix as well as a 3-vector describing a translation. For magnetic symmetry operations also the time reversal symmetry can be specified (not used in xrayutilities) """ def __init__(self, D, t, m=1): """ Initialize the symmetry operation Parameters ---------- D : array-like transformation matrix (3x3) t : array-like translation vector (3) m : int, optional indicates time reversal in magnetic groups. +1 (default, no time reveral) or -1 """ self._W = numpy.zeros((4, 4)) self._W[:3, :3] = numpy.asarray(D) self._W[:3, 3] = numpy.asarray(t) self._W[3, 3] = 1 self._m = m @classmethod def from_xyz(cls, xyz): """ create a SymOp from the xyz notation typically used in CIF files. Parameters ---------- xyz : str string describing the symmetry operation (e.g. '-y, -x, z') """ D = numpy.zeros((3, 3)) t = numpy.array(eval(xyz, {'x': 0, 'y': 0, 'z': 0})[:3]) m = 1 for i, expr in enumerate(xyz.strip('()').split(',')): if i == 3: # time reversal property m = int(expr) continue if 'x' in expr: D[i, 0] = -1 if '-x' in expr else 1 if 'y' in expr: D[i, 1] = -1 if '-y' in expr else 1 if 'z' in expr: D[i, 2] = -1 if '-z' in expr else 1 return SymOp(D, t, m) def xyz(self, showtimerev=False): """ return the symmetry operation in xyz notation """ ret = '' t = self.t for i in range(3): expr = '' if abs(self._W[i, 0]) == 1: expr += '+x' if self._W[i, 0] == 1 else '-x' if abs(self._W[i, 1]) == 1: expr += '+y' if self._W[i, 1] == 1 else '-y' if abs(self._W[i, 2]) == 1: expr += '+z' if self._W[i, 2] == 1 else '-z' if t[i] != 0: expr += '+' if t[i] > 0 else '' expr += str(fractions.Fraction(t[i]).limit_denominator(100)) expr = expr.strip('+') ret += expr + ', ' if showtimerev: ret += '{:+d}'.format(self._m) return ret.strip(', ') @property def D(self): """transformation matrix of the symmetry operation""" return self._W[:3, :3] @property def t(self): """translation vector of the symmetry operation""" return self._W[:3, 3] def __eq__(self, other): if not isinstance(other, SymOp): return NotImplemented return self._m == other._m and numpy.all(self._W == other._W) @staticmethod def foldback(v): return v - numpy.round(v, config.DIGITS) // 1 def apply_rotation(self, vec): return self.D @ vec def apply(self, vec, foldback=True): lv = numpy.asarray(list(vec) + [1, ]) result = (self._W @ lv)[:3] if foldback: return self.foldback(result) return result def apply_axial(self, vec): return self._m * numpy.linalg.det(self.D) * self.D @ vec def combine(self, other): if not isinstance(other, SymOp): return NotImplemented W = self._W @ other._W return SymOp(W[:3, :3], self.foldback(W[:3, 3]), self._m*other._m) def __str__(self): return '({})'.format(self.xyz(showtimerev=True)) def __repr__(self): return self.__str__() class SGLattice(object): """ lattice object created from the space group number and corresponding unit cell parameters. atoms in the unit cell are specified by their Wyckoff position and their free parameters. """ def __init__(self, sgrp, *args, **kwargs): """ initialize class with space group number and atom list Parameters ---------- sgrp : int or str Space group number *args : float space group parameters. depending on the space group number this are 1 (cubic) to 6 (triclinic) parameters. cubic : a (lattice parameter). hexagonal : a, c. trigonal : a, c. tetragonal : a, c. orthorhombic : a, b, c. monoclinic : a, b, c, beta (in degree). triclinic : a, b, c, alpha, beta, gamma (in degree). atoms : list, optional list of elements either as Element object or string with the element name. If you specify atoms you have to also give the same number of Wyckoff positions pos : list, optional list of the atomic positions within the unit cell. This can be given as Wyckoff position along with its parameters or any position of an atom which will be used to identify the Wyckoff position. If a position has no free parameter the parameters can be omitted. Example: [('2i', (0.1, 0.2, 0.3)), '1a', (0, 0.5, 0)] occ : list, optional site occupation for the atoms. This is optional and defaults to 1 if not given. b : list, optional b-factor of the atom used as exp(-b*q**2/(4*pi)**2) to reduce the intensity of this atom (only used in case of temp=0 in StructureFactor and chi calculation) """ valid_kwargs = {'atoms': 'list of elements', 'pos': 'list of Wyckoff positions', 'occ': 'site occupations', 'b': 'Debye Waller exponents'} utilities.check_kwargs(kwargs, valid_kwargs, self.__class__.__name__) self.space_group = str(sgrp) self.space_group_nr = int(self.space_group.split(':')[0]) try: self.space_group_suf = ':' + self.space_group.split(':')[1] except IndexError: self.space_group_suf = get_default_sgrp_suf(self.space_group_nr) if self.space_group_suf != '': self.space_group = str(self.space_group_nr) + self.space_group_suf self.name = sgrp_name[str(self.space_group_nr)] + self.space_group_suf self.crystal_system, nargs = sgrp_sym[self.space_group_nr] self.crystal_system += self.space_group_suf if len(args) != nargs: raise ValueError('XU: number of parameters (%d) does not match the' ' crystal symmetry (%s:%d)' % (len(args), self.crystal_system, nargs)) self.free_parameters = OrderedDict() for a, par in zip(args, sgrp_params[self.crystal_system][0]): self.free_parameters[par] = a self._parameters = OrderedDict() for i, p in enumerate(('a', 'b', 'c', 'alpha', 'beta', 'gamma')): key = sgrp_params[self.crystal_system][1][i] if isinstance(key, str): self._parameters[p] = self.free_parameters[key] else: self._parameters[p] = key # define lattice vectors self._ai = numpy.zeros((3, 3)) self._bi = numpy.empty((3, 3)) a, b, c, alpha, beta, gamma = self._parameters.values() ra = radians(alpha) self._paramhelp = [cos(ra), cos(radians(beta)), cos(radians(gamma)), sin(ra), 0] self._setlat() # save general Wyckoff position self._gplabel = sorted(wp[self.space_group], key=lambda s: int(s[:-1]))[-1] self._gp = wp[self.space_group][self._gplabel] # set atom positions in the lattice base self._wbase = WyckoffBase() atoms = kwargs.get('atoms', None) wps = kwargs.get('pos', None) if atoms: occs = kwargs.get('occ', [1.0, ] * len(atoms)) bs = kwargs.get('b', [0.0, ] * len(atoms)) for at, wpos, o, b in zip(atoms, wps, occs, bs): if (not isinstance(wpos, (tuple, list, numpy.ndarray)) or len(wpos) < 3): self._wbase.append(at, wpos, o, b) else: # atomic position given -> identify Wyckoff position # find all equivalent positions gplist = set() for p in self._gp[1]: pos = eval(p, {'x': wpos[0], 'y': wpos[1], 'z': wpos[2]}) pos = SymOp.foldback(pos) gplist.add(tuple(pos)) wyckpos = get_wyckpos(self.space_group, gplist) if config.VERBOSITY >= config.INFO_LOW: print(f"XU.materials.SGLattice: position {wpos} " f"identified as {wyckpos}") self._wbase.append(at, wyckpos, o, b) self.nsites = len(self._wbase) # symmetry operations and reflection conditions placeholder self._hklmat = [] self._symops = [] self._hklcond = [] self._hklcond_wp = [] self._iscentrosymmetric = None @property def symops(self): """ return the set of symmetry operations from the general Wyckoff position of the space group. """ if self._symops == []: for p in self._gp[1]: self._symops.append(SymOp.from_xyz(p)) return self._symops @property def _hklsym(self): if self._hklmat == []: for s in self.symops: self._hklmat.append(numpy.round(self._qtransform.imatrix @ self._transform.matrix @ s.D @ self._transform.imatrix @ self._qtransform.matrix, config.DIGITS)) return self._hklmat def base(self): """ generator of atomic position within the unit cell. """ if not self._wbase: return sgwp = wp[self.space_group] for (atom, w, occ, b) in self._wbase: x, y, z = None, None, None parint, poslist, dummy = sgwp[w[0]] i = 0 if parint & 1: try: x = w[1][i] except TypeError: print('XU.materials: Wyckoff position %s of %s needs ' 'parameters (%d) -> wrong material definition' % (w[0], str(self.space_group), parint)) raise i += 1 if parint & 2: try: y = w[1][i] except TypeError: print('XU.materials: Wyckoff position %s of %s needs ' 'parameters (%d) -> wrong material definition' % (w[0], str(self.space_group), parint)) raise i += 1 if parint & 4: try: z = w[1][i] except TypeError: print('XU.materials: Wyckoff position %s of %s needs ' 'parameters (%d) -> wrong material definition' % (w[0], str(self.space_group), parint)) raise i += 1 if w[1]: if i != len(w[1]): raise TypeError('XU.materials: too many parameters for ' 'Wyckoff position') for p in poslist: pos = eval(p, {'x': x, 'y': y, 'z': z}) pos = SymOp.foldback(pos) yield atom, pos, occ, b def _setlat(self): a, b, c, alpha, beta, gamma = self._parameters.values() ca, cb, cg, sa, vh = self._paramhelp vh = sqrt(1 - ca**2-cb**2-cg**2 + 2*ca*cb*cg) self._paramhelp[4] = vh self._ai[0, 0] = a * vh / sa self._ai[0, 1] = a * (cg-cb*ca) / sa self._ai[0, 2] = a * cb self._ai[1, 1] = b * sa self._ai[1, 2] = b * ca self._ai[2, 2] = c self._transform = math.Transform(self._ai.T) self._setb() def _setb(self): V = self.UnitCellVolume() p = 2. * numpy.pi / V math.VecCross(p*self._ai[1, :], self._ai[2, :], out=self._bi[0, :]) math.VecCross(p*self._ai[2, :], self._ai[0, :], out=self._bi[1, :]) math.VecCross(p*self._ai[0, :], self._ai[1, :], out=self._bi[2, :]) self._qtransform = math.Transform(self._bi.T) def _set_params_from_sym(self): for i, p in enumerate(('a', 'b', 'c', 'alpha', 'beta', 'gamma')): key = sgrp_params[self.crystal_system][1][i] if isinstance(key, str): if p not in self.free_parameters: self._parameters[p] = self.free_parameters[key] @property def a(self): return self._parameters['a'] @a.setter def a(self, value): if 'a' not in self.free_parameters: raise RuntimeError("a can not be set, its not a free parameter!") self._parameters['a'] = value self.free_parameters['a'] = value self._set_params_from_sym() self._setlat() @property def b(self): return self._parameters['b'] @b.setter def b(self, value): if 'b' not in self.free_parameters: raise RuntimeError("b can not be set, its not a free parameter!") self._parameters['b'] = value self.free_parameters['b'] = value self._set_params_from_sym() self._setlat() @property def c(self): return self._parameters['c'] @c.setter def c(self, value): if 'c' not in self.free_parameters: raise RuntimeError("c can not be set, its not a free parameter!") self._parameters['c'] = value self.free_parameters['c'] = value self._set_params_from_sym() self._setlat() @property def alpha(self): return self._parameters['alpha'] @alpha.setter def alpha(self, value): if 'alpha' not in self.free_parameters: raise RuntimeError("alpha can not be set for this space group!") self._parameters['alpha'] = value self.free_parameters['alpha'] = value self._set_params_from_sym() ra = radians(value) self._paramhelp[0] = cos(ra) self._paramhelp[3] = sin(ra) self._setlat() @property def beta(self): return self._parameters['beta'] @beta.setter def beta(self, value): if 'beta' not in self.free_parameters: raise RuntimeError("beta can not be set for this space group!") self._parameters['beta'] = value self.free_parameters['beta'] = value self._set_params_from_sym() self._paramhelp[1] = cos(radians(value)) self._setlat() @property def gamma(self): return self._parameters['gamma'] @gamma.setter def gamma(self, value): if 'gamma' not in self.free_parameters: raise RuntimeError("gamma can not be set for this space group!") self._parameters['gamma'] = value self.free_parameters['gamma'] = value self._set_params_from_sym() self._paramhelp[2] = cos(radians(value)) self._setlat() def __eq__(self, other): """ compare another SGLattice instance to decide if both are equal. To be equal they have to use the same space group, have equal lattice parameters and contain equal atoms in their base. """ if self.space_group != other.space_group: return False # compare lattice parameters for prop in self.free_parameters: if not isclose(getattr(self, prop), getattr(other, prop)): return False # compare atoms in base for e in self._wbase: if e not in other._wbase: return False idx = other._wbase.index(e) if not WyckoffBase.entry_eq(e, other._wbase[idx]): return False return True def GetPoint(self, *args): """ determine lattice points with indices given in the argument Examples -------- >>> xu.materials.Si.lattice.GetPoint(0, 0, 4) array([ 0. , 0. , 21.72416]) or >>> xu.materials.Si.lattice.GetPoint((1, 1, 1)) array([ 5.43104, 5.43104, 5.43104]) """ if len(args) == 1: args = args[0] return self._transform(args) def GetQ(self, *args): """ determine the reciprocal lattice points with indices given in the argument """ if len(args) == 1: args = args[0] return self._qtransform(args) def GetHKL(self, *args): """ determine the Miller indices of the given reciprocal lattice points """ if len(args) == 1: args = args[0] return self._qtransform.inverse(args) def UnitCellVolume(self): """ function to calculate the unit cell volume of a lattice (angstrom^3) """ a, b, c, alpha, beta, gamma = self._parameters.values() return a * b * c * self._paramhelp[4] def ApplyStrain(self, eps): """ Applies a certain strain on a lattice. The result is a change in the base vectors. The full strain matrix (3x3) needs to be given. Note: Here you specify the strain and not the stress -> NO elastic response of the material will be considered! Note: Although the symmetry of the crystal can be lowered by this operation the spacegroup remains unchanged! The 'free_parameters' attribute is, however, updated to mimic the possible reduction of the symmetry. Parameters ---------- eps : array-like a 3x3 matrix with all strain components """ if isinstance(eps, (list, tuple)): eps = numpy.asarray(eps, dtype=numpy.double) if eps.shape != (3, 3): raise InputError("ApplyStrain needs a 3x3 matrix " "with strain values") ai = self._ai + numpy.dot(eps, self._ai.T).T self._parameters['a'] = math.VecNorm(ai[0, :]) self._parameters['b'] = math.VecNorm(ai[1, :]) self._parameters['c'] = math.VecNorm(ai[2, :]) self._parameters['alpha'] = math.VecAngle(ai[1, :], ai[2, :], deg=True) self._parameters['beta'] = math.VecAngle(ai[0, :], ai[2, :], deg=True) self._parameters['gamma'] = math.VecAngle(ai[0, :], ai[1, :], deg=True) # update helper parameters ra = radians(self._parameters['alpha']) self._paramhelp[0] = cos(ra) self._paramhelp[1] = cos(radians(self._parameters['beta'])) self._paramhelp[2] = cos(radians(self._parameters['gamma'])) self._paramhelp[3] = sin(ra) # set new transformations self._setlat() # update free_parameters for p, v in self.free_parameters.items(): self.free_parameters[p] = self._parameters[p] # artificially reduce symmetry if needed for i, p in enumerate(('a', 'b', 'c', 'alpha', 'beta', 'gamma')): key = sgrp_params[self.crystal_system][1][i] if isinstance(key, str): if self._parameters[p] != self.free_parameters[key]: self.free_parameters[p] = self._parameters[p] else: if self._parameters[p] != key: self.free_parameters[p] = self._parameters[p] @property def iscentrosymmetric(self): """ returns a boolean to determine if the lattice has centrosymmetry. """ if self._iscentrosymmetric is None: self._iscentrosymmetric = False for s in self.symops: if numpy.all(-numpy.identity(3) == s.D): self._iscentrosymmetric = True break return self._iscentrosymmetric def isequivalent(self, hkl1, hkl2): """ determining if hkl1 and hkl2 are two crystallographical equivalent pairs of Miller indices. Note that this function considers the effect of non-centrosymmetry! Parameters ---------- hkl1, hkl2 : list Miller indices to be checked for equivalence Returns ------- bool """ return tuple(hkl2) in self.equivalent_hkls(hkl1) def equivalent_hkls(self, hkl): """ returns a list of equivalent hkl peaks depending on the crystal system """ suf = self.space_group_suf nr = self.space_group_nr if suf == get_default_sgrp_suf(nr): ehkl = set(eqhkl_default[nr](hkl[0], hkl[1], hkl[2])) elif suf in get_possible_sgrp_suf(nr): ehkl = set(eqhkl_custom[nr](hkl[0], hkl[1], hkl[2])) else: # fallback calculation with symmetry operations ehkl = numpy.unique(numpy.einsum('...ij,j', self._hklsym, hkl), axis=0) ehkl = set(tuple(e) for e in ehkl) return ehkl def hkl_allowed(self, hkl, returnequivalents=False): """ check if Bragg reflection with Miller indices hkl can exist according to the reflection conditions. If no reflection conditions are available this function returns True for all hkl values! Parameters ---------- hkl : tuple or list Miller indices of the reflection to check returnequivalents : bool, optional If True all the equivalent Miller indices of hkl are returned in a set as second return argument. Returns ------- allowed : bool True if reflection can have non-zero structure factor, false otherwise equivalents : set, optional set of equivalent Miller indices if returnequivalents is True """ # generate all equivalent hkl values which also need to be checked: hkls = self.equivalent_hkls(hkl) def build_return(allowed, requi=returnequivalents): if requi: return allowed, hkls else: return allowed # load reflection conditions if needed if self._gp[2] == 'n/a': return build_return(True) if self._hklcond == [] and self._gp[2] is not None: self._hklcond = hklcond_group.findall(self._gp[2]) if self._hklcond_wp == []: for lab in set([e[1][0] for e in self._wbase]): if lab == self._gplabel: # if gen. pos. is occupied skip it self._hklcond_wp.append(None) elif wp[self.space_group][lab][2] is None: self._hklcond_wp.append(None) else: self._hklcond_wp.append(hklcond_group.findall( wp[self.space_group][lab][2])) # call C-code to (efficiently) test the conditions ret = cxrayutilities.testhklcond(hkls, self._hklcond, self._hklcond_wp) return build_return(ret) def get_allowed_hkl(self, qmax): """ return a set of all allowed reflections up to a maximal specified momentum transfer. Parameters ---------- qmax : float maximal momentum transfer Returns ------- hklset : set set of allowed hkl reflections """ def recurse_hkl(h, k, l, kstep): if (h, k, l) in hkltested: return m = self._qtransform.matrix q = m[:, 0]*h + m[:, 1]*k + m[:, 2]*l # efficient matmul if sqrt(q[0]**2 + q[1]**2 + q[2]**2) >= qmax: return else: allowed, eqhkl = self.hkl_allowed((h, k, l), returnequivalents=True) hkltested.update(eqhkl) if not self.iscentrosymmetric: hkltested.update((-h, -k, -l) for (h, k, l) in eqhkl) if allowed: hklset.update(eqhkl) if not self.iscentrosymmetric: eqhkl = self.equivalent_hkls((-h, -k, -l)) hklset.update(eqhkl) recurse_hkl(h+1, k, l, kstep) recurse_hkl(h, k+kstep, l, kstep) recurse_hkl(h, k, l+1, kstep) recurse_hkl(h, k, l-1, kstep) hklset = set() hkltested = set() q = numpy.empty(3) recurse_hkl(0, 0, 0, +1) recurse_hkl(1, -1, 0, -1) hklset.remove((0, 0, 0)) return hklset def reflection_conditions(self): """ return string of reflection conditions, both general (from space group) and of Wyckoff positions """ ostr = "Reflection conditions:\n" ostr += " general: %s\n" % str(self._gp[2]) for wplabel in set([e[1][0] for e in self._wbase]): ostr += "%8s: %s \n" % (wplabel, str(wp[self.space_group][wplabel][2])) return ostr def __str__(self): ostr = "{sg} {cs} {n}: a = {a:.4f}, b = {b:.4f} c= {c:.4f}\n" +\ "alpha = {alpha:.3f}, beta = {beta:.3f}, gamma = {gamma:.3f}\n" ostr = ostr.format(sg=self.space_group, cs=self.crystal_system, n=self.name, **self._parameters) if self._wbase: ostr += "Lattice base:\n" ostr += str(self._wbase) ostr += self.reflection_conditions() return ostr def convert_to_P1(self): """ create a P1 equivalent of this SGLattice instance. Returns ------- SGLattice instance with the same properties as the present lattice, however, in the P1 setting. """ a, b, c, alpha, beta, gamma = (self.a, self.b, self.c, self.alpha, self.beta, self.gamma) atoms = [] pos = [] occ = [] biso = [] for at, p, o, bf in self.base(): atoms.append(at) pos.append(('1a', p)) occ.append(o) biso.append(bf) return type(self)(1, a, b, c, alpha, beta, gamma, atoms=atoms, pos=pos, occ=occ, b=biso) def findsym(self): """ method to return the highest symmetry description of the current material. This method does not consider to change the unit cell dimensions but only searches the highest symmetry spacegroup which with the current unit cell setting can be described. It is therefore not an implementation of FINDSYM [1]. Returns ------- new SGLattice-instance a new SGLattice instance is returned with the highest available symmetry description. (see restrictions above) [1] https://stokes.byu.edu/iso/findsym.php """ def identify_wyckpos(sgrp, atoms): """ try to determine suitable Wyckoff positions Parameters ----------: sgrp : str space group identifier (including potential suffix) atoms : list list of atoms in the unit cell. should have equivalent entries as SGLattice.base() Returns ------- success, atomdict "success" is a boolean flag to indicate if equivalents for every atom position could be found upon success "atomdict" is a dictionary which can be used in a SGLattice definition to specify the unit cell. If success==False atomdict is None. """ # get all Wyckpos for this spacegroup atomdict = {'atoms': [], 'pos': [], 'occ': [], 'b': []} success = True elements = set(at[0] for at in atoms) # check all atomic species seperately for el in elements: catoms = list(filter(lambda at: at[0] == el, atoms)) found = numpy.zeros(len(catoms), dtype=bool) # see if atomic positions fit to Wyckoff positions for k, wyckpos in wp[sgrp].items(): num = int(k[:-1]) if num > len(catoms)-sum(found): break parint, poslist, reflcond = wyckpos for f, (dummy, xyz, occ, biso) in zip(found, catoms): if f: continue foundwp, pospar = testwp(parint, poslist[0], xyz) if foundwp: # generate parameters of this Wyckoff position pardict = _get_pardict(parint, pospar) # check if all equivalent positions are also # occupied by the same atom nremoved = 0 for p in poslist: pos = eval(p, {}, pardict) pos = SymOp.foldback(pos) for n, entry in enumerate(catoms): if numpy.allclose(entry[1], pos): # remove atom from search list nremoved += 1 found[n] = True if nremoved != num: # not all equivalent positions occupied return False, None else: # add Wyckoff position to output if pospar is None: atomdict['pos'].append(k) else: atomdict['pos'].append((k, list(pospar))) atomdict['atoms'].append(el) atomdict['occ'].append(occ) atomdict['b'].append(biso) if num > len(catoms)-sum(found): break if len(catoms) != sum(found): success = False atomdict = None break return success, atomdict # determine possible space groups for unit cell parameters systems = [] possible_sg = [] # determine possible lattice systems for sys in ('cubic', 'hexagonal', 'trigonal:R', 'trigonal:H', 'tetragonal', 'orthorhombic', 'monoclinic:b', 'monoclinic:c', 'triclinic'): freepar, ucpar = sgrp_params[sys] ucsys = [] for par in ucpar: if isinstance(par, numbers.Number): ucsys.append(par) else: ucsys.append(eval(par, {}, self._parameters)) if numpy.allclose(tuple(self._parameters.values()), ucsys): systems.append(sys) # determine suitable space group numbers and names for these families for sys in systems: freepar, ucpar = sgrp_params[sys] splitsys = sys.split(':') suf = None if len(splitsys) > 1: suf = ':' + splitsys[1] for sgrange, v in sgrp_sym.items(): if v[0] == splitsys[0]: for nr in reversed(sgrange): sufs = get_possible_sgrp_suf(nr) if suf is None: if isinstance(sufs, list): for s in sufs: possible_sg.append((str(nr) + s, freepar)) else: possible_sg.append((str(nr), freepar)) else: if suf in sufs: possible_sg.append((str(nr) + suf, freepar)) else: possible_sg.append((str(nr), freepar)) # test space groups starting with the highest symmetric one if config.VERBOSITY >= config.DEBUG: print("XU.materials.SGLattice.findsym: possible space groups: ", possible_sg) for sgrp, ucpar in possible_sg: success, atoms = identify_wyckpos(sgrp, list(self.base())) if success: break return type(self)(sgrp, *[self._parameters[k] for k in ucpar], **atoms) def transform(self, mat, origin): """ Transform the unit cell with the matrix and origin shift given in the parameters. This function returns a new instance of SGLattice which contains the highest possible symmetry description of the transformed unit cell. After the transformation (see [1]) the findsym method is used to create the new SGLattice instance. Parameters ---------- mat : (3, 3) list, or ndarray, optional transformation matrix of the unit cell. The matrix definition aims to be consistent with what is used on the Bilbao Crystallographic Server [1]. This only defines the linear part, while the origin shift is given by origin. origin : (3, ) list, or ndarray origin shift of the unit cell [1]. [1] https://www.cryst.ehu.es/cgi-bin/cryst/programs/nph-doc-trmat """ # transform unit cell dimensions ait = numpy.transpose(mat @ self._ai.T) a, b, c = [math.VecNorm(ait[i, :]) for i in range(3)] al, be, ga = [math.VecAngle(ait[(i+1) % 3, :], ait[(i+2) % 3, :], deg=True) for i in range(3)] param = (a, b, c, al, be, ga) # transform atomic positions and also search neighboring cells to find # all equivalent positions def recurse_cells(fracpos, outset): """ recursive search in neighboring cells. Note that if this function would operate on the Wyckoff position level it could use the determinant of the transformation matrix (unit cell volume change) to derive an additional abort criterion """ pos = invmat @ fracpos + origin pos = SymOp.foldback(pos) pos = tuple(numpy.round(pos, config.DIGITS)) if pos in outset: return else: outset.add(pos) g = numpy.mgrid[-1:2, -1:2, -1:2].T.reshape(27, 3).tolist() g.remove([0, 0, 0]) for off in g: recurse_cells(numpy.add(fracpos, off), outset) allatoms = {'atoms': [], 'pos': [], 'occ': [], 'b': []} invmat = numpy.linalg.inv(mat) elements = set(at[0] for at in self.base()) # check all atomic species seperately for el in elements: catoms = list(filter(lambda at: at[0] == el, self.base())) elset = set() for at in catoms: eqpos = set() recurse_cells(at[1], eqpos) for pos in eqpos: if (pos, at[2], at[3]) not in elset: elset.add((pos, at[2], at[3])) allatoms['pos'].append(('1a', pos)) allatoms['atoms'].append(el) allatoms['occ'].append(at[2]) allatoms['b'].append(at[3]) p1 = type(self)(1, *param, **allatoms) if config.VERBOSITY >= config.DEBUG: print("XU.materials.SGLattice: transform via P1: ", p1) return p1.findsym()
dkriegner/xrayutilities
lib/xrayutilities/materials/spacegrouplattice.py
Python
gpl-2.0
54,980
[ "CRYSTAL" ]
270e934258746006b37d92fd9a85229304da1e1ff0b74c6b9d132ba66f51c56f
# Copyright 2020 Tecnativa - Jairo Llopis # License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl). { "name": "Partner contact access link", "summary": "Allow to visit the full contact form from a company", "version": "14.0.1.0.0", "development_status": "Production/Stable", "category": "Tools", "website": "https://github.com/OCA/partner-contact", "author": "Tecnativa, Odoo Community Association (OCA)", "maintainers": ["Yajo"], "license": "AGPL-3", "application": False, "installable": True, "depends": ["base"], "data": ["views/res_partner_views.xml"], }
OCA/partner-contact
partner_contact_access_link/__manifest__.py
Python
agpl-3.0
618
[ "VisIt" ]
9fe15a78c9e1a4bc12377eabae64be2bba843c8d51c8417dbaefc3ebafb58954
# encoding = utf-8 class Node(object): pass class A(Node): pass class B(Node): pass class C(A, B): pass class Visitor(object): def visit(self, node, *args, **kwargs): meth = None for cls in node.__class__.__mro__: meth_name = 'visit_'+cls.__name__ meth = getattr(self, meth_name, None) if meth: break if not meth: meth = self.generic_visit return meth(node, *args, **kwargs) def generic_visit(self, node, *args, **kwargs): print('generic_visit '+node.__class__.__name__) def visit_B(self, node, *args, **kwargs): print('visit_B '+node.__class__.__name__) a = A() b = B() c = C() visitor = Visitor() visitor.visit(a) visitor.visit(b) visitor.visit(c)
JiangKlijna/design-pattern
VisitorPattern/Visitor.py
Python
apache-2.0
799
[ "VisIt" ]
fc80b1a76dd03986676a13ffa91f5efacff473b64ff83c85d1edc0d2667f7ccf
""" SSH (Virtual) Computing Element For a given IP/host it will send jobs directly through ssh **Configuration Parameters** Configuration for the SSHComputingElement submission can be done via the configuration system. BatchSystem: Underlying batch system that is going to be used to orchestrate executable files. The Batch System has to be accessible from the LocalCE. By default, the LocalComputingElement submits directly on the host via the Host class. SharedArea: Area used to store executable/output/error files if they are not aready defined via BatchOutput, BatchError, InfoArea, ExecutableArea and/or WorkArea. The path should be absolute. BatchOutput: Area where the job outputs are stored. If not defined: SharedArea + '/data' is used. If not absolute: SharedArea + path is used. BatchError: Area where the job errors are stored. If not defined: SharedArea + '/data' is used. If not absolute: SharedArea + path is used. ExecutableArea: Area where the executable files are stored if necessary. If not defined: SharedArea + '/data' is used. If not absolute: SharedArea + path is used. SSHHost: SSH host name SSHUser: SSH user login SSHPassword: SSH password SSHPort: Port number if not standard, e.g. for the gsissh access SSHKey: Location of the ssh private key for no-password connection SSHOptions: Any other SSH options to be used SSHTunnel: String defining the use of intermediate SSH host. Example:: ssh -i /private/key/location -l final_user final_host SSHType: SSH (default) or gsissh **Code Documentation** """ import six import os import json import stat import shutil import errno from urllib.parse import urlparse from urllib.parse import quote from urllib.parse import unquote from shlex import quote as shlex_quote import DIRAC from DIRAC import S_OK, S_ERROR from DIRAC import gLogger from DIRAC.Resources.Computing.ComputingElement import ComputingElement from DIRAC.Resources.Computing.PilotBundle import bundleProxy, writeScript from DIRAC.Resources.Computing.BatchSystems.executeBatch import executeBatchContent from DIRAC.Core.Utilities.List import uniqueElements from DIRAC.Core.Utilities.File import makeGuid from DIRAC.Core.Utilities.List import breakListIntoChunks class SSH(object): """SSH class encapsulates passing commands and files through an SSH tunnel to a remote host. It can use either ssh or gsissh access. The final host where the commands will be executed and where the files will copied/retrieved can be reached through an intermediate host if SSHTunnel parameters is defined. SSH constructor parameters are defined in a SSH accessible Computing Element in the Configuration System: - SSHHost: SSH host name - SSHUser: SSH user login - SSHPassword: SSH password - SSHPort: port number if not standard, e.g. for the gsissh access - SSHKey: location of the ssh private key for no-password connection - SSHOptions: any other SSH options to be used - SSHTunnel: string defining the use of intermediate SSH host. Example: 'ssh -i /private/key/location -l final_user final_host' - SSHType: ssh ( default ) or gsissh The class public interface includes two methods: sshCall( timeout, command_sequence ) scpCall( timeout, local_file, remote_file, upload = False/True ) """ def __init__(self, host=None, parameters=None): self.host = host if parameters is None: parameters = {} if not host: self.host = parameters.get("SSHHost", "") self.user = parameters.get("SSHUser", "") self.password = parameters.get("SSHPassword", "") self.port = parameters.get("SSHPort", "") self.key = parameters.get("SSHKey", "") self.options = parameters.get("SSHOptions", "") self.sshTunnel = parameters.get("SSHTunnel", "") self.sshType = parameters.get("SSHType", "ssh") if self.port: self.options += " -p %s" % self.port if self.key: self.options += " -i %s" % self.key self.options = self.options.strip() self.log = gLogger.getSubLogger("SSH") def __ssh_call(self, command, timeout): try: import pexpect expectFlag = True except BaseException: from DIRAC.Core.Utilities.Subprocess import shellCall expectFlag = False if not timeout: timeout = 999 if expectFlag: ssh_newkey = "Are you sure you want to continue connecting" try: child = pexpect.spawn(command, timeout=timeout, encoding="utf-8") i = child.expect([pexpect.TIMEOUT, ssh_newkey, pexpect.EOF, "assword: "]) if i == 0: # Timeout return S_OK((-1, child.before, "SSH login failed")) elif i == 1: # SSH does not have the public key. Just accept it. child.sendline("yes") child.expect("assword: ") i = child.expect([pexpect.TIMEOUT, "assword: "]) if i == 0: # Timeout return S_OK((-1, str(child.before) + str(child.after), "SSH login failed")) elif i == 1: child.sendline(self.password) child.expect(pexpect.EOF) return S_OK((0, child.before, "")) elif i == 2: # Passwordless login, get the output return S_OK((0, child.before, "")) if self.password: child.sendline(self.password) child.expect(pexpect.EOF) return S_OK((0, child.before, "")) return S_ERROR((-2, child.before, "")) except Exception as x: res = (-1, "Encountered exception %s: %s" % (Exception, str(x))) return S_ERROR(res) else: # Try passwordless login result = shellCall(timeout, command) # print ( "!!! SSH command: %s returned %s\n" % (command, result) ) if result["Value"][0] == 255: return S_ERROR((-1, "Cannot connect to host %s" % self.host, "")) return result def sshCall(self, timeout, cmdSeq): """Execute remote command via a ssh remote call :param int timeout: timeout of the command :param cmdSeq: list of command components :type cmdSeq: python:list """ command = cmdSeq if isinstance(cmdSeq, list): command = " ".join(cmdSeq) pattern = "__DIRAC__" if self.sshTunnel: command = command.replace("'", '\\\\\\"') command = command.replace("$", "\\\\\\$") command = '/bin/sh -c \' %s -q %s -l %s %s "%s \\"echo %s; %s\\" " \' ' % ( self.sshType, self.options, self.user, self.host, self.sshTunnel, pattern, command, ) else: # command = command.replace( '$', '\$' ) command = '%s -q %s -l %s %s "echo %s; %s"' % ( self.sshType, self.options, self.user, self.host, pattern, command, ) self.log.debug("SSH command: %s" % command) result = self.__ssh_call(command, timeout) self.log.debug("SSH command result %s" % str(result)) if not result["OK"]: return result # Take the output only after the predefined pattern ind = result["Value"][1].find("__DIRAC__") if ind == -1: return result status, output, error = result["Value"] output = output[ind + 9 :] if output.startswith("\r"): output = output[1:] if output.startswith("\n"): output = output[1:] result["Value"] = (status, output, error) return result def scpCall(self, timeout, localFile, remoteFile, postUploadCommand="", upload=True): """Perform file copy through an SSH magic. :param int timeout: timeout of the command :param str localFile: local file path, serves as source for uploading and destination for downloading. Can take 'Memory' as value, in this case the downloaded contents is returned as result['Value'] :param str remoteFile: remote file full path :param str postUploadCommand: command executed on the remote side after file upload :param bool upload: upload if True, download otherwise """ # shlex_quote aims to prevent any security issue or problems with filepath containing spaces # it returns a shell-escaped version of the filename localFile = shlex_quote(localFile) remoteFile = shlex_quote(remoteFile) if upload: if self.sshTunnel: remoteFile = remoteFile.replace("$", r"\\\\\$") postUploadCommand = postUploadCommand.replace("$", r"\\\\\$") command = '/bin/sh -c \'cat %s | %s -q %s %s@%s "%s \\"cat > %s; %s\\""\' ' % ( localFile, self.sshType, self.options, self.user, self.host, self.sshTunnel, remoteFile, postUploadCommand, ) else: command = "/bin/sh -c \"cat %s | %s -q %s %s@%s 'cat > %s; %s'\" " % ( localFile, self.sshType, self.options, self.user, self.host, remoteFile, postUploadCommand, ) else: finalCat = "| cat > %s" % localFile if localFile.lower() == "memory": finalCat = "" if self.sshTunnel: remoteFile = remoteFile.replace("$", "\\\\\\$") command = '/bin/sh -c \'%s -q %s -l %s %s "%s \\"cat %s\\"" %s\'' % ( self.sshType, self.options, self.user, self.host, self.sshTunnel, remoteFile, finalCat, ) else: remoteFile = remoteFile.replace("$", r"\$") command = "/bin/sh -c '%s -q %s -l %s %s \"cat %s\" %s'" % ( self.sshType, self.options, self.user, self.host, remoteFile, finalCat, ) self.log.debug("SSH copy command: %s" % command) return self.__ssh_call(command, timeout) class SSHComputingElement(ComputingElement): ############################################################################# def __init__(self, ceUniqueID): """Standard constructor.""" super(SSHComputingElement, self).__init__(ceUniqueID) self.ceType = "SSH" self.execution = "SSHCE" self.submittedJobs = 0 self.outputTemplate = "" self.errorTemplate = "" ############################################################################ def setProxy(self, proxy, valid=0): """ Set and prepare proxy to use :param str proxy: proxy to use :param int valid: proxy validity period :return: S_OK/S_ERROR """ ComputingElement.setProxy(self, proxy, valid) if self.ceParameters.get("SSHType", "ssh") == "gsissh": result = self._prepareProxy() if not result["OK"]: gLogger.error("SSHComputingElement: failed to set up proxy", result["Message"]) return result return S_OK() ############################################################################# def _addCEConfigDefaults(self): """Method to make sure all necessary Configuration Parameters are defined""" # First assure that any global parameters are loaded ComputingElement._addCEConfigDefaults(self) # Now batch system specific ones if "ExecQueue" not in self.ceParameters: self.ceParameters["ExecQueue"] = self.ceParameters.get("Queue", "") if "SharedArea" not in self.ceParameters: # . isn't a good location, move to $HOME self.ceParameters["SharedArea"] = "$HOME" if "BatchOutput" not in self.ceParameters: self.ceParameters["BatchOutput"] = "data" if "BatchError" not in self.ceParameters: self.ceParameters["BatchError"] = "data" if "ExecutableArea" not in self.ceParameters: self.ceParameters["ExecutableArea"] = "data" if "InfoArea" not in self.ceParameters: self.ceParameters["InfoArea"] = "info" if "WorkArea" not in self.ceParameters: self.ceParameters["WorkArea"] = "work" def _reset(self): """Process CE parameters and make necessary adjustments""" batchSystemName = self.ceParameters.get("BatchSystem", "Host") if "BatchSystem" not in self.ceParameters: self.ceParameters["BatchSystem"] = batchSystemName result = self.loadBatchSystem(batchSystemName) if not result["OK"]: self.log.error("Failed to load the batch system plugin", batchSystemName) return result self.user = self.ceParameters["SSHUser"] self.queue = self.ceParameters["Queue"] self.submitOptions = self.ceParameters.get("SubmitOptions", "") if "ExecQueue" not in self.ceParameters or not self.ceParameters["ExecQueue"]: self.ceParameters["ExecQueue"] = self.ceParameters.get("Queue", "") self.execQueue = self.ceParameters["ExecQueue"] self.log.info("Using queue: ", self.queue) self.sharedArea = self.ceParameters["SharedArea"] self.batchOutput = self.ceParameters["BatchOutput"] if not self.batchOutput.startswith("/"): self.batchOutput = os.path.join(self.sharedArea, self.batchOutput) self.batchError = self.ceParameters["BatchError"] if not self.batchError.startswith("/"): self.batchError = os.path.join(self.sharedArea, self.batchError) self.infoArea = self.ceParameters["InfoArea"] if not self.infoArea.startswith("/"): self.infoArea = os.path.join(self.sharedArea, self.infoArea) self.executableArea = self.ceParameters["ExecutableArea"] if not self.executableArea.startswith("/"): self.executableArea = os.path.join(self.sharedArea, self.executableArea) self.workArea = self.ceParameters["WorkArea"] if not self.workArea.startswith("/"): self.workArea = os.path.join(self.sharedArea, self.workArea) self.account = self.ceParameters.get("Account", "") self.removeOutput = True if "RemoveOutput" in self.ceParameters: if self.ceParameters["RemoveOutput"].lower() in ["no", "false", "0"]: self.removeOutput = False self.preamble = self.ceParameters.get("Preamble", "") result = self._prepareRemoteHost() if not result["OK"]: return result return S_OK() def _prepareRemoteHost(self, host=None): """Prepare remote directories and upload control script""" ssh = SSH(host=host, parameters=self.ceParameters) # Make remote directories dirTuple = tuple( uniqueElements( [self.sharedArea, self.executableArea, self.infoArea, self.batchOutput, self.batchError, self.workArea] ) ) nDirs = len(dirTuple) cmd = "mkdir -p %s; " * nDirs % dirTuple cmd = "bash -c '%s'" % cmd self.log.verbose("Creating working directories on %s" % self.ceParameters["SSHHost"]) result = ssh.sshCall(30, cmd) if not result["OK"]: self.log.error("Failed creating working directories", "(%s)" % result["Message"][1]) return result status, output, _error = result["Value"] if status == -1: self.log.error("Timeout while creating directories") return S_ERROR(errno.ETIME, "Timeout while creating directories") if "cannot" in output: self.log.error("Failed to create directories", "(%s)" % output) return S_ERROR(errno.EACCES, "Failed to create directories") # Upload the control script now result = self._generateControlScript() if not result["OK"]: self.log.warn("Failed generating control script") return result localScript = result["Value"] self.log.verbose( "Uploading %s script to %s" % (self.batchSystem.__class__.__name__, self.ceParameters["SSHHost"]) ) remoteScript = "%s/execute_batch" % self.sharedArea result = ssh.scpCall(30, localScript, remoteScript, postUploadCommand="chmod +x %s" % remoteScript) if not result["OK"]: self.log.warn("Failed uploading control script: %s" % result["Message"][1]) return result status, output, _error = result["Value"] if status != 0: if status == -1: self.log.warn("Timeout while uploading control script") return S_ERROR("Timeout while uploading control script") self.log.warn("Failed uploading control script: %s" % output) return S_ERROR("Failed uploading control script") # Delete the generated control script locally try: os.remove(localScript) except OSError: self.log.warn("Failed removing the generated control script locally") return S_ERROR("Failed removing the generated control script locally") # Chmod the control scripts # self.log.verbose( 'Chmod +x control script' ) # result = ssh.sshCall( 10, "chmod +x %s/%s" % ( self.sharedArea, self.controlScript ) ) # if not result['OK']: # self.log.warn( 'Failed chmod control script: %s' % result['Message'][1] ) # return result # status, output, _error = result['Value'] # if status != 0: # if status == -1: # self.log.warn( 'Timeout while chmod control script' ) # return S_ERROR( 'Timeout while chmod control script' ) # else: # self.log.warn( 'Failed uploading chmod script: %s' % output ) # return S_ERROR( 'Failed uploading chmod script' ) return S_OK() def _generateControlScript(self): """Generates a control script from a BatchSystem and a script called executeBatch :return: a path containing the script generated """ # Get the batch system module to use batchSystemDir = os.path.join(os.path.dirname(DIRAC.__file__), "Resources", "Computing", "BatchSystems") batchSystemScript = os.path.join(batchSystemDir, "%s.py" % self.batchSystem.__class__.__name__) # Get the executeBatch.py content: an str variable composed of code content that has to be extracted # The control script is generated from the batch system module and this variable controlScript = os.path.join(batchSystemDir, "control_script.py") try: shutil.copyfile(batchSystemScript, controlScript) with open(controlScript, "a") as cs: cs.write(executeBatchContent) except IOError: return S_ERROR("IO Error trying to generate control script") return S_OK("%s" % controlScript) def __executeHostCommand(self, command, options, ssh=None, host=None): if not ssh: ssh = SSH(host=host, parameters=self.ceParameters) options["BatchSystem"] = self.batchSystem.__class__.__name__ options["Method"] = command options["SharedDir"] = self.sharedArea options["OutputDir"] = self.batchOutput options["ErrorDir"] = self.batchError options["WorkDir"] = self.workArea options["InfoDir"] = self.infoArea options["ExecutionContext"] = self.execution options["User"] = self.user options["Queue"] = self.queue options = json.dumps(options) options = quote(options) cmd = ( "bash --login -c 'python %s/execute_batch %s || python3 %s/execute_batch %s || python2 %s/execute_batch %s'" % (self.sharedArea, options, self.sharedArea, options, self.sharedArea, options) ) self.log.verbose("CE submission command: %s" % cmd) result = ssh.sshCall(120, cmd) if not result["OK"]: self.log.error("%s CE job submission failed" % self.ceType, result["Message"]) return result sshStatus = result["Value"][0] sshStdout = result["Value"][1] sshStderr = result["Value"][2] # Examine results of the job submission if sshStatus == 0: output = sshStdout.strip().replace("\r", "").strip() try: index = output.index("============= Start output ===============") output = output[index + 42 :] except Exception: return S_ERROR("Invalid output from remote command: %s" % output) try: output = unquote(output) result = json.loads(output) if isinstance(result, six.string_types) and result.startswith("Exception:"): return S_ERROR(result) return S_OK(result) except Exception: return S_ERROR("Invalid return structure from job submission") else: return S_ERROR("\n".join([sshStdout, sshStderr])) def submitJob(self, executableFile, proxy, numberOfJobs=1): # self.log.verbose( "Executable file path: %s" % executableFile ) if not os.access(executableFile, 5): os.chmod(executableFile, stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH) # if no proxy is supplied, the executable can be submitted directly # otherwise a wrapper script is needed to get the proxy to the execution node # The wrapper script makes debugging more complicated and thus it is # recommended to transfer a proxy inside the executable if possible. if proxy: self.log.verbose("Setting up proxy for payload") wrapperContent = bundleProxy(executableFile, proxy) name = writeScript(wrapperContent, os.getcwd()) submitFile = name else: # no proxy submitFile = executableFile result = self._submitJobToHost(submitFile, numberOfJobs) if proxy: os.remove(submitFile) return result def _submitJobToHost(self, executableFile, numberOfJobs, host=None): """Submit prepared executable to the given host""" ssh = SSH(host=host, parameters=self.ceParameters) # Copy the executable submitFile = os.path.join(self.executableArea, os.path.basename(executableFile)) result = ssh.scpCall(30, executableFile, submitFile, postUploadCommand="chmod +x %s" % submitFile) if not result["OK"]: return result jobStamps = [] for _i in range(numberOfJobs): jobStamps.append(makeGuid()[:8]) numberOfProcessors = self.ceParameters.get("NumberOfProcessors", 1) wholeNode = self.ceParameters.get("WholeNode", False) # numberOfNodes is treated as a string as it can contain values such as "2-4" # where 2 would represent the minimum number of nodes to allocate, and 4 the maximum numberOfNodes = self.ceParameters.get("NumberOfNodes", "1") self.numberOfGPUs = self.ceParameters.get("NumberOfGPUs") # Collect command options commandOptions = { "Executable": submitFile, "NJobs": numberOfJobs, "SubmitOptions": self.submitOptions, "JobStamps": jobStamps, "WholeNode": wholeNode, "NumberOfProcessors": numberOfProcessors, "NumberOfNodes": numberOfNodes, "Preamble": self.preamble, "NumberOfGPUs": self.numberOfGPUs, "Account": self.account, } if host: commandOptions["SSHNodeHost"] = host resultCommand = self.__executeHostCommand("submitJob", commandOptions, ssh=ssh, host=host) if not resultCommand["OK"]: return resultCommand result = resultCommand["Value"] if result["Status"] != 0: return S_ERROR("Failed job submission: %s" % result["Message"]) else: batchIDs = result["Jobs"] if batchIDs: batchSystemName = self.batchSystem.__class__.__name__.lower() if host is None: jobIDs = [ "%s%s://%s/%s" % (self.ceType.lower(), batchSystemName, self.ceName, _id) for _id in batchIDs ] else: jobIDs = [ "%s%s://%s/%s/%s" % (self.ceType.lower(), batchSystemName, self.ceName, host, _id) for _id in batchIDs ] else: return S_ERROR("No jobs IDs returned") result = S_OK(jobIDs) self.submittedJobs += len(batchIDs) return result def killJob(self, jobIDList): """Kill a bunch of jobs""" if isinstance(jobIDList, six.string_types): jobIDList = [jobIDList] return self._killJobOnHost(jobIDList) def _killJobOnHost(self, jobIDList, host=None): """Kill the jobs for the given list of job IDs""" jobDict = {} for job in jobIDList: stamp = os.path.basename(urlparse(job).path) jobDict[stamp] = job stampList = list(jobDict) commandOptions = {"JobIDList": stampList, "User": self.user} resultCommand = self.__executeHostCommand("killJob", commandOptions, host=host) if not resultCommand["OK"]: return resultCommand result = resultCommand["Value"] if result["Status"] != 0: return S_ERROR("Failed job kill: %s" % result["Message"]) if result["Failed"]: return S_ERROR("%d jobs failed killing" % len(result["Failed"])) return S_OK(len(result["Successful"])) def _getHostStatus(self, host=None): """Get jobs running at a given host""" resultCommand = self.__executeHostCommand("getCEStatus", {}, host=host) if not resultCommand["OK"]: return resultCommand result = resultCommand["Value"] if result["Status"] != 0: return S_ERROR("Failed to get CE status: %s" % result["Message"]) return S_OK(result) def getCEStatus(self): """Method to return information on running and pending jobs.""" result = S_OK() result["SubmittedJobs"] = self.submittedJobs result["RunningJobs"] = 0 result["WaitingJobs"] = 0 resultHost = self._getHostStatus() if not resultHost["OK"]: return resultHost result["RunningJobs"] = resultHost["Value"].get("Running", 0) result["WaitingJobs"] = resultHost["Value"].get("Waiting", 0) if "AvailableCores" in resultHost["Value"]: result["AvailableCores"] = resultHost["Value"]["AvailableCores"] self.log.verbose("Waiting Jobs: ", result["WaitingJobs"]) self.log.verbose("Running Jobs: ", result["RunningJobs"]) return result def getJobStatus(self, jobIDList): """Get the status information for the given list of jobs""" return self._getJobStatusOnHost(jobIDList) def _getJobStatusOnHost(self, jobIDList, host=None): """Get the status information for the given list of jobs""" resultDict = {} jobDict = {} for job in jobIDList: stamp = os.path.basename(urlparse(job).path) jobDict[stamp] = job stampList = list(jobDict) for jobList in breakListIntoChunks(stampList, 100): resultCommand = self.__executeHostCommand("getJobStatus", {"JobIDList": jobList}, host=host) if not resultCommand["OK"]: return resultCommand result = resultCommand["Value"] if result["Status"] != 0: return S_ERROR("Failed to get job status: %s" % result["Message"]) for stamp in result["Jobs"]: resultDict[jobDict[stamp]] = result["Jobs"][stamp] return S_OK(resultDict) def _getJobOutputFiles(self, jobID, host=None): """Get output file names for the specific CE""" jobStamp = os.path.basename(urlparse(jobID).path) host = urlparse(jobID).hostname if "OutputTemplate" in self.ceParameters: self.outputTemplate = self.ceParameters["OutputTemplate"] self.errorTemplate = self.ceParameters["ErrorTemplate"] if self.outputTemplate: output = self.outputTemplate % jobStamp error = self.errorTemplate % jobStamp elif "OutputTemplate" in self.ceParameters: self.outputTemplate = self.ceParameters["OutputTemplate"] self.errorTemplate = self.ceParameters["ErrorTemplate"] output = self.outputTemplate % jobStamp error = self.errorTemplate % jobStamp elif hasattr(self.batchSystem, "getJobOutputFiles"): # numberOfNodes is treated as a string as it can contain values such as "2-4" # where 2 would represent the minimum number of nodes to allocate, and 4 the maximum numberOfNodes = self.ceParameters.get("NumberOfNodes", "1") commandOptions = { "JobIDList": [jobStamp], "OutputDir": self.batchOutput, "ErrorDir": self.batchError, "NumberOfNodes": numberOfNodes, } resultCommand = self.__executeHostCommand("getJobOutputFiles", commandOptions, host=host) if not resultCommand["OK"]: return resultCommand result = resultCommand["Value"] if result["Status"] != 0: return S_ERROR("Failed to get job output files: %s" % result["Message"]) if "OutputTemplate" in result: self.outputTemplate = result["OutputTemplate"] self.errorTemplate = result["ErrorTemplate"] output = result["Jobs"][jobStamp]["Output"] error = result["Jobs"][jobStamp]["Error"] else: output = "%s/%s.out" % (self.batchOutput, jobStamp) error = "%s/%s.err" % (self.batchError, jobStamp) return S_OK((jobStamp, host, output, error)) def getJobOutput(self, jobID, localDir=None): """Get the specified job standard output and error files. If the localDir is provided, the output is returned as file in this directory. Otherwise, the output is returned as strings. """ self.log.verbose("Getting output for jobID", jobID) result = self._getJobOutputFiles(jobID) if not result["OK"]: return result jobStamp, host, outputFile, errorFile = result["Value"] if localDir: localOutputFile = "%s/%s.out" % (localDir, jobStamp) localErrorFile = "%s/%s.err" % (localDir, jobStamp) else: localOutputFile = "Memory" localErrorFile = "Memory" # Take into account the SSHBatch possible SSHHost syntax host = host.split("/")[0] ssh = SSH(host=host, parameters=self.ceParameters) result = ssh.scpCall(30, localOutputFile, outputFile, upload=False) if not result["OK"]: return result result = ssh.scpCall(30, localErrorFile, errorFile, upload=False) if not result["OK"]: return result if localDir: output = localOutputFile error = localErrorFile else: output = result["Value"][1] error = result["Value"][1] return S_OK((output, error))
DIRACGrid/DIRAC
src/DIRAC/Resources/Computing/SSHComputingElement.py
Python
gpl-3.0
32,689
[ "DIRAC" ]
cb96977ca468470dff6d391509e98b7ece4b03996ae0a3cddbf18c648239fdba
import xml.etree.ElementTree as ET from io import BytesIO import numpy as np from .http_util import DataQuery, HTTPEndPoint, parse_iso_date from .ncss_dataset import NCSSDataset def default_unit_handler(data, units=None): # pylint:disable=unused-argument r'Default unit handler, which ignores units and just returns ``numpy.array``' return np.array(data) class NCSS(HTTPEndPoint): r'''Wraps access to the NetCDF Subset Service (NCSS) on a THREDDS server. Simplifies access via HTTP to the NCSS endpoint. Parses the metadata, provides data download and parsing based on the appropriate query. Attributes ---------- metadata : ``NCSSDataset`` instance Contains the result of parsing the NCSS endpoint's dataset.xml. This has information about the time and space coverage, as well as full information about all of the variables. variables : set of strings Names of all variables available in this dataset unit_handler : callable Function to handle units that come with CSV/XML data. Should be a callable that takes a list of string values and unit str (can be ``None``), and returns the desired representation of values. Defaults to ignoring units and returning ``numpy.array``. ''' # Need staticmethod to keep this from becoming a bound method, where self # is passed implicitly unit_handler = staticmethod(default_unit_handler) def _get_metadata(self): # Need to use .content here to avoid decode problems meta_xml = self.get_path('dataset.xml').content root = ET.fromstring(meta_xml) self.metadata = NCSSDataset(root) self.variables = set(self.metadata.variables.keys()) def query(self): r'''Returns a new query for NCSS Returns ------- ``NCSSQuery`` instance ''' return NCSSQuery() def validate_query(self, query): r'''Validate a query Determines whether `query` is well-formed. This includes checking for all required parameters, as well as checking parameters for valid values. Parameters ---------- query : ``NCSSQuery`` instance Returns ------- valid : bool Whether `query` is valid. ''' # Make sure all variables are in the dataset return query.var and all(var in self.variables for var in query.var) def get_data(self, query): r'''Fetch parsed data from a THREDDS server using NCSS Requests data from the NCSS endpoint given the parameters in `query` and handles parsing of the returned content based on the mimetype. Parameters ---------- query : ``NCSSQuery`` instance The parameters to send to the NCSS endpoint Returns ------- Parsed data response from the server. Exact format depends on the format of the response. See Also -------- get_data_raw ''' resp = self.get_query(query) return response_handlers(resp, self.unit_handler) def get_data_raw(self, query): r'''Fetch raw data from a THREDDS server using NCSS Requests data from the NCSS endpoint given the parameters in `query` and returns the raw bytes of the response. Parameters ---------- query : ``NCSSQuery`` instance The parameters to send to the NCSS endpoint Returns ------- content : bytes The raw, unparsed, data returned by the server See Also -------- get_data ''' return self.get_query(query).content class NCSSQuery(DataQuery): r'''An object representing a query to the NetCDF Subset Service (NCSS). Expands on the queries supported by ``DataQuery`` to add queries specific to NCSS. ''' def projection_box(self, min_x, min_y, max_x, max_y): r'''Add a bounding box in projected (native) coordinates to the query. This adds a request for a spatial bounding box, bounded by (`min_x`, `max_x`) for x direction and (`min_y`, `max_y`) for the y direction. This modifies the query in-place, but returns ``self`` so that multiple queries can be chained together on one line. This replaces any existing spatial queries that have been set. Parameters ---------- min_x : float The left edge of the bounding box min_y : float The bottom edge of the bounding box max_x : float The right edge of the bounding box max_y: float The top edge of the bounding box Returns ------- self : ``NCSSQuery`` instance Returns self for chaining calls ''' self._set_query(self.spatial_query, minx=min_x, miny=min_y, maxx=max_x, maxy=max_y) return self def accept(self, fmt): r'''Set format for data returned from NCSS. This modifies the query in-place, but returns ``self`` so that multiple queries can be chained together on one line. Parameters ---------- fmt : str The format to send to the server. Returns ------- self : ``NCSSQuery`` instance Returns self for chaining calls ''' return self.add_query_parameter(accept=fmt) def add_lonlat(self, value=True): r'''Sets whether NCSS should add latitude/longitude to returned data. This is only used on grid requests. Used to make returned data CF-compliant. This modifies the query in-place, but returns ``self`` so that multiple queries can be chained together on one line. Parameters ---------- value : bool, optional Whether to add latitude/longitude information. Defaults to True. Returns ------- self : ``NCSSQuery`` instance Returns self for chaining calls ''' return self.add_query_parameter(addLatLon=value) def strides(self, time=None, spatial=None): r'''Set time and/or spatial (horizontal) strides. This is only used on grid requests. Used to skip points in the returned data. This modifies the query in-place, but returns ``self`` so that multiple queries can be chained together on one line. Parameters ---------- time : int, optional Stride for times returned. Defaults to None, which is equivalent to 1. spatial : int, optional Stride for horizontal grid. Defaults to None, which is equivalent to 1. Returns ------- self : ``NCSSQuery`` instance Returns self for chaining calls ''' if time: self.add_query_parameter(timeStride=time) if spatial: self.add_query_parameter(horizStride=spatial) return self def vertical_level(self, level): r'''Set vertical level for which data should be retrieved. The value depends on the coordinate values for the vertical dimension of the requested variable. This modifies the query in-place, but returns ``self`` so that multiple queries can be chained together on one line. Parameters ---------- level : float The value of the desired level Returns ------- self : ``NCSSQuery`` instance Returns self for chaining calls ''' return self.add_query_parameter(vertCoord=level) # # The remainder of the file is not considered part of the public API. # Use at your own risk! # class ResponseRegistry(object): r'''Allows registering of functions to be called based on the mimetype in the response headers. ''' def __init__(self): self._reg = dict() def register(self, mimetype): def dec(func): self._reg[mimetype] = func return func return dec def default(self, content, units): # pylint:disable=unused-argument return content def __call__(self, resp, unit_handler): mimetype = resp.headers['content-type'].split(';')[0] return self._reg.get(mimetype, self.default)(resp.content, unit_handler) response_handlers = ResponseRegistry() def squish(l): r'If list contains only 1 element, return it instead' return l if len(l) > 1 else l[0] def combine_dicts(l): r'Combine a list of dictionaries into single one' ret = dict() for item in l: ret.update(item) return ret # Parsing of XML returns from NCSS @response_handlers.register('application/xml') def parse_xml(data, handle_units): root = ET.fromstring(data) return squish(parse_xml_dataset(root, handle_units)) def parse_xml_point(elem): point = dict() units = dict() for data in elem.findall('data'): name = data.get('name') unit = data.get('units') point[name] = float(data.text) if name != 'date' else parse_iso_date(data.text) if unit: units[name] = unit return point, units def combine_xml_points(l, units, handle_units): ret = dict() for item in l: for key, value in item.items(): ret.setdefault(key, []).append(value) for key, value in ret.items(): if key != 'date': ret[key] = handle_units(value, units.get(key, None)) return ret def parse_xml_dataset(elem, handle_units): points, units = zip(*[parse_xml_point(p) for p in elem.findall('point')]) # Group points by the contents of each point datasets = dict() for p in points: datasets.setdefault(tuple(p.keys()), []).append(p) all_units = combine_dicts(units) return [combine_xml_points(d, all_units, handle_units) for d in datasets.values()] # Handling of netCDF 3/4 from NCSS try: from netCDF4 import Dataset from tempfile import NamedTemporaryFile @response_handlers.register('application/x-netcdf') @response_handlers.register('application/x-netcdf4') def read_netcdf(data, handle_units): # pylint:disable=unused-argument with NamedTemporaryFile() as tmp_file: tmp_file.write(data) tmp_file.flush() return Dataset(tmp_file.name, 'r') except ImportError: import warnings warnings.warn('netCDF4 module not installed. ' 'Will be unable to handle NetCDF returns from NCSS.') # Parsing of CSV data returned from NCSS @response_handlers.register('text/plain') def parse_csv_response(data, unit_handler): return squish([parse_csv_dataset(d, unit_handler) for d in data.split(b'\n\n')]) def parse_csv_header(line): units = dict() names = [] for var in line.split(','): start = var.find('[') if start < 0: names.append(str(var)) continue else: names.append(str(var[:start])) end = var.find(']', start) unitstr = var[start + 1:end] eq = unitstr.find('=') if eq >= 0: # go past = and ", skip final " units[names[-1]] = unitstr[eq + 2:-1] return names, units def parse_csv_dataset(data, handle_units): fobj = BytesIO(data) names, units = parse_csv_header(fobj.readline().decode('utf-8')) arrs = np.genfromtxt(fobj, dtype=None, names=names, delimiter=',', unpack=True, converters={'date': lambda s: parse_iso_date(s.decode('utf-8'))}) d = dict() for f in arrs.dtype.fields: dat = arrs[f] if dat.dtype == np.object: dat = dat.tolist() d[f] = handle_units(dat, units.get(f, None)) return d
hyoklee/siphon
siphon/ncss.py
Python
mit
11,843
[ "NetCDF" ]
9fad9b8a3fec34eb9e41178f3c3f8cbc63560b491fd038557510bca5aeb90e0a
# ---------------------------------------------------------------------------- # Copyright (c) 2013--, scikit-bio development team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file COPYING.txt, distributed with this software. # ---------------------------------------------------------------------------- import os import inspect import warnings import nose import numpy as np import numpy.testing as npt import pandas.util.testing as pdt from skbio.util import SkbioWarning from ._decorator import experimental class ReallyEqualMixin: """Use this for testing __eq__/__ne__. Taken and modified from the following public domain code: https://ludios.org/testing-your-eq-ne-cmp/ """ def assertReallyEqual(self, a, b): # assertEqual first, because it will have a good message if the # assertion fails. self.assertEqual(a, b) self.assertEqual(b, a) self.assertTrue(a == b) self.assertTrue(b == a) self.assertFalse(a != b) self.assertFalse(b != a) def assertReallyNotEqual(self, a, b): # assertNotEqual first, because it will have a good message if the # assertion fails. self.assertNotEqual(a, b) self.assertNotEqual(b, a) self.assertFalse(a == b) self.assertFalse(b == a) self.assertTrue(a != b) self.assertTrue(b != a) @nose.tools.nottest class SuppressSkbioWarnings(nose.plugins.Plugin): def configure(self, options, conf): super(SuppressSkbioWarnings, self).configure(options, conf) self.enabled = True def beforeTest(self, test): warnings.simplefilter("ignore", category=SkbioWarning) def afterTest(self, test): warnings.resetwarnings() @nose.tools.nottest class TestRunner: """Simple wrapper class around nosetests functionality. Parameters ---------- filename : str __file__ attribute passed in from the caller. This tells the tester where to start looking for tests. Notes ----- The primary purpose of this class is to create an interface which users of scikit-bio can use to run all of the built in tests. Normally this would be done by invoking nosetests directly from the command line, but scikit-bio needs several additional options which make the command long and ugly. This class invokes nose with the required options. """ @experimental(as_of="0.4.0") def __init__(self, filename): self._filename = filename self._test_dir = os.path.dirname(filename) @experimental(as_of="0.4.0") def test(self, verbose=False): """Performs the actual running of the tests. Parameters ---------- verbose : bool flag for running in verbose mode. Returns ------- bool test run success status """ # NOTE: it doesn't seem to matter what the first element of the argv # list is, there just needs to be something there. argv = [self._filename, '-I DO_NOT_IGNORE_ANYTHING', '--with-doctest', '--doctest-tests'] if verbose: argv.append('-v') return nose.core.run(argv=argv, defaultTest=self._test_dir, addplugins=[SuppressSkbioWarnings()]) @experimental(as_of="0.4.0") def get_data_path(fn, subfolder='data'): """Return path to filename ``fn`` in the data folder. During testing it is often necessary to load data files. This function returns the full path to files in the ``data`` subfolder by default. Parameters ---------- fn : str File name. subfolder : str, defaults to ``data`` Name of the subfolder that contains the data. Returns ------- str Inferred absolute path to the test data for the module where ``get_data_path(fn)`` is called. Notes ----- The requested path may not point to an existing file, as its existence is not checked. """ # getouterframes returns a list of tuples: the second tuple # contains info about the caller, and the second element is its # filename callers_filename = inspect.getouterframes(inspect.currentframe())[1][1] path = os.path.dirname(os.path.abspath(callers_filename)) data_path = os.path.join(path, subfolder, fn) return data_path @experimental(as_of="0.4.0") def assert_ordination_results_equal(left, right, ignore_method_names=False, ignore_axis_labels=False, ignore_directionality=False, decimal=7): """Assert that ordination results objects are equal. This is a helper function intended to be used in unit tests that need to compare ``OrdinationResults`` objects. Parameters ---------- left, right : OrdinationResults Ordination results to be compared for equality. ignore_method_names : bool, optional Ignore differences in `short_method_name` and `long_method_name`. ignore_axis_labels : bool, optional Ignore differences in axis labels (i.e., column labels). ignore_directionality : bool, optional Ignore differences in directionality (i.e., differences in signs) for attributes `samples`, `features` and `biplot_scores`. Raises ------ AssertionError If the two objects are not equal. """ npt.assert_equal(type(left) is type(right), True) if not ignore_method_names: npt.assert_equal(left.short_method_name, right.short_method_name) npt.assert_equal(left.long_method_name, right.long_method_name) _assert_frame_equal(left.samples, right.samples, ignore_columns=ignore_axis_labels, ignore_directionality=ignore_directionality, decimal=decimal) _assert_frame_equal(left.features, right.features, ignore_columns=ignore_axis_labels, ignore_directionality=ignore_directionality, decimal=decimal) _assert_frame_equal(left.biplot_scores, right.biplot_scores, ignore_columns=ignore_axis_labels, ignore_directionality=ignore_directionality, decimal=decimal) _assert_frame_equal(left.sample_constraints, right.sample_constraints, ignore_columns=ignore_axis_labels, ignore_directionality=ignore_directionality, decimal=decimal) _assert_series_equal(left.eigvals, right.eigvals, ignore_axis_labels, decimal=decimal) _assert_series_equal(left.proportion_explained, right.proportion_explained, ignore_axis_labels, decimal=decimal) def _assert_series_equal(left_s, right_s, ignore_index=False, decimal=7): # assert_series_equal doesn't like None... if left_s is None or right_s is None: assert left_s is None and right_s is None else: npt.assert_almost_equal(left_s.values, right_s.values, decimal=decimal) if not ignore_index: pdt.assert_index_equal(left_s.index, right_s.index) def _assert_frame_equal(left_df, right_df, ignore_index=False, ignore_columns=False, ignore_directionality=False, decimal=7): # assert_frame_equal doesn't like None... if left_df is None or right_df is None: assert left_df is None and right_df is None else: left_values = left_df.values right_values = right_df.values if ignore_directionality: left_values, right_values = _normalize_signs(left_values, right_values) npt.assert_almost_equal(left_values, right_values, decimal=decimal) if not ignore_index: pdt.assert_index_equal(left_df.index, right_df.index) if not ignore_columns: pdt.assert_index_equal(left_df.columns, right_df.columns) def _normalize_signs(arr1, arr2): """Change column signs so that "column" and "-column" compare equal. This is needed because results of eigenproblmes can have signs flipped, but they're still right. Notes ===== This function tries hard to make sure that, if you find "column" and "-column" almost equal, calling a function like np.allclose to compare them after calling `normalize_signs` succeeds. To do so, it distinguishes two cases for every column: - It can be all almost equal to 0 (this includes a column of zeros). - Otherwise, it has a value that isn't close to 0. In the first case, no sign needs to be flipped. I.e., for |epsilon| small, np.allclose(-epsilon, 0) is true if and only if np.allclose(epsilon, 0) is. In the second case, the function finds the number in the column whose absolute value is largest. Then, it compares its sign with the number found in the same index, but in the other array, and flips the sign of the column as needed. """ # Let's convert everyting to floating point numbers (it's # reasonable to assume that eigenvectors will already be floating # point numbers). This is necessary because np.array(1) / # np.array(0) != np.array(1.) / np.array(0.) arr1 = np.asarray(arr1, dtype=np.float64) arr2 = np.asarray(arr2, dtype=np.float64) if arr1.shape != arr2.shape: raise ValueError( "Arrays must have the same shape ({0} vs {1}).".format(arr1.shape, arr2.shape) ) # To avoid issues around zero, we'll compare signs of the values # with highest absolute value max_idx = np.abs(arr1).argmax(axis=0) max_arr1 = arr1[max_idx, range(arr1.shape[1])] max_arr2 = arr2[max_idx, range(arr2.shape[1])] sign_arr1 = np.sign(max_arr1) sign_arr2 = np.sign(max_arr2) # Store current warnings, and ignore division by zero (like 1. / # 0.) and invalid operations (like 0. / 0.) wrn = np.seterr(invalid='ignore', divide='ignore') differences = sign_arr1 / sign_arr2 # The values in `differences` can be: # 1 -> equal signs # -1 -> diff signs # Or nan (0/0), inf (nonzero/0), 0 (0/nonzero) np.seterr(**wrn) # Now let's deal with cases where `differences != \pm 1` special_cases = (~np.isfinite(differences)) | (differences == 0) # In any of these cases, the sign of the column doesn't matter, so # let's just keep it differences[special_cases] = 1 return arr1 * differences, arr2 @experimental(as_of="0.4.0") def assert_data_frame_almost_equal(left, right): """Raise AssertionError if ``pd.DataFrame`` objects are not "almost equal". Wrapper of ``pd.util.testing.assert_frame_equal``. Floating point values are considered "almost equal" if they are within a threshold defined by ``assert_frame_equal``. This wrapper uses a number of checks that are turned off by default in ``assert_frame_equal`` in order to perform stricter comparisons (for example, ensuring the index and column types are the same). It also does not consider empty ``pd.DataFrame`` objects equal if they have a different index. Other notes: * Index (row) and column ordering must be the same for objects to be equal. * NaNs (``np.nan``) in the same locations are considered equal. This is a helper function intended to be used in unit tests that need to compare ``pd.DataFrame`` objects. Parameters ---------- left, right : pd.DataFrame ``pd.DataFrame`` objects to compare. Raises ------ AssertionError If `left` and `right` are not "almost equal". See Also -------- pandas.util.testing.assert_frame_equal """ # pass all kwargs to ensure this function has consistent behavior even if # `assert_frame_equal`'s defaults change pdt.assert_frame_equal(left, right, check_dtype=True, check_index_type=True, check_column_type=True, check_frame_type=True, check_less_precise=False, check_names=True, by_blocks=False, check_exact=False) # this check ensures that empty DataFrames with different indices do not # compare equal. exact=True specifies that the type of the indices must be # exactly the same assert_index_equal(left.index, right.index) def assert_series_almost_equal(left, right): # pass all kwargs to ensure this function has consistent behavior even if # `assert_series_equal`'s defaults change pdt.assert_series_equal(left, right, check_dtype=True, check_index_type=True, check_series_type=True, check_less_precise=False, check_names=True, check_exact=False, check_datetimelike_compat=False, obj='Series') # this check ensures that empty Series with different indices do not # compare equal. assert_index_equal(left.index, right.index) def assert_index_equal(a, b): pdt.assert_index_equal(a, b, exact=True, check_names=True, check_exact=True)
kdmurray91/scikit-bio
skbio/util/_testing.py
Python
bsd-3-clause
13,803
[ "scikit-bio" ]
7e34a3e883591eaee2aa6a4e843ec1b9e2a533651ee04d2343c5855e44cf8859
# pylint: disable=arguments-differ """ Models for the shopping cart and assorted purchase types """ from collections import namedtuple from datetime import datetime from datetime import timedelta from decimal import Decimal import json import analytics from io import BytesIO from django.db.models import Q, F import pytz import logging import smtplib import StringIO import csv from boto.exception import BotoServerError # this is a super-class of SESError and catches connection errors from django.dispatch import receiver from django.db import models from django.conf import settings from django.core.exceptions import ObjectDoesNotExist from django.core.mail import send_mail from django.contrib.auth.models import User from django.utils.translation import ugettext as _, ugettext_lazy from django.db import transaction from django.db.models import Sum, Count from django.db.models.signals import post_save, post_delete from django.core.urlresolvers import reverse from model_utils.managers import InheritanceManager from model_utils.models import TimeStampedModel from django.core.mail.message import EmailMessage from xmodule.modulestore.django import modulestore from eventtracking import tracker from openedx.core.djangoapps.theming.helpers import get_value as get_themed_value from courseware.courses import get_course_by_id from config_models.models import ConfigurationModel from course_modes.models import CourseMode from edxmako.shortcuts import render_to_string from student.models import CourseEnrollment, UNENROLL_DONE from util.query import use_read_replica_if_available from xmodule_django.models import CourseKeyField from .exceptions import ( InvalidCartItem, PurchasedCallbackException, ItemAlreadyInCartException, AlreadyEnrolledInCourseException, CourseDoesNotExistException, MultipleCouponsNotAllowedException, InvalidStatusToRetire, UnexpectedOrderItemStatus, ItemNotFoundInCartException ) from microsite_configuration import microsite from shoppingcart.pdf import PDFInvoice log = logging.getLogger("shoppingcart") ORDER_STATUSES = ( # The user is selecting what he/she wants to purchase. ('cart', 'cart'), # The user has been sent to the external payment processor. # At this point, the order should NOT be modified. # If the user returns to the payment flow, he/she will start a new order. ('paying', 'paying'), # The user has successfully purchased the items in the order. ('purchased', 'purchased'), # The user's order has been refunded. ('refunded', 'refunded'), # The user's order went through, but the order was erroneously left # in 'cart'. ('defunct-cart', 'defunct-cart'), # The user's order went through, but the order was erroneously left # in 'paying'. ('defunct-paying', 'defunct-paying'), ) # maps order statuses to their defunct states ORDER_STATUS_MAP = { 'cart': 'defunct-cart', 'paying': 'defunct-paying', } # we need a tuple to represent the primary key of various OrderItem subclasses OrderItemSubclassPK = namedtuple('OrderItemSubclassPK', ['cls', 'pk']) class OrderTypes(object): """ This class specify purchase OrderTypes. """ PERSONAL = 'personal' BUSINESS = 'business' ORDER_TYPES = ( (PERSONAL, 'personal'), (BUSINESS, 'business'), ) class Order(models.Model): """ This is the model for an order. Before purchase, an Order and its related OrderItems are used as the shopping cart. FOR ANY USER, THERE SHOULD ONLY EVER BE ZERO OR ONE ORDER WITH STATUS='cart'. """ class Meta(object): app_label = "shoppingcart" user = models.ForeignKey(User, db_index=True) currency = models.CharField(default="usd", max_length=8) # lower case ISO currency codes status = models.CharField(max_length=32, default='cart', choices=ORDER_STATUSES) purchase_time = models.DateTimeField(null=True, blank=True) refunded_time = models.DateTimeField(null=True, blank=True) # Now we store data needed to generate a reasonable receipt # These fields only make sense after the purchase bill_to_first = models.CharField(max_length=64, blank=True) bill_to_last = models.CharField(max_length=64, blank=True) bill_to_street1 = models.CharField(max_length=128, blank=True) bill_to_street2 = models.CharField(max_length=128, blank=True) bill_to_city = models.CharField(max_length=64, blank=True) bill_to_state = models.CharField(max_length=8, blank=True) bill_to_postalcode = models.CharField(max_length=16, blank=True) bill_to_country = models.CharField(max_length=64, blank=True) bill_to_ccnum = models.CharField(max_length=8, blank=True) # last 4 digits bill_to_cardtype = models.CharField(max_length=32, blank=True) # a JSON dump of the CC processor response, for completeness processor_reply_dump = models.TextField(blank=True) # bulk purchase registration code workflow billing details company_name = models.CharField(max_length=255, null=True, blank=True) company_contact_name = models.CharField(max_length=255, null=True, blank=True) company_contact_email = models.CharField(max_length=255, null=True, blank=True) recipient_name = models.CharField(max_length=255, null=True, blank=True) recipient_email = models.CharField(max_length=255, null=True, blank=True) customer_reference_number = models.CharField(max_length=63, null=True, blank=True) order_type = models.CharField(max_length=32, default='personal', choices=OrderTypes.ORDER_TYPES) @classmethod def get_cart_for_user(cls, user): """ Always use this to preserve the property that at most 1 order per user has status = 'cart' """ # find the newest element in the db try: cart_order = cls.objects.filter(user=user, status='cart').order_by('-id')[:1].get() except ObjectDoesNotExist: # if nothing exists in the database, create a new cart cart_order, _created = cls.objects.get_or_create(user=user, status='cart') return cart_order @classmethod def does_user_have_cart(cls, user): """ Returns a boolean whether a shopping cart (Order) exists for the specified user """ return cls.objects.filter(user=user, status='cart').exists() @classmethod def user_cart_has_items(cls, user, item_types=None): """ Returns true if the user (anonymous user ok) has a cart with items in it. (Which means it should be displayed. If a item_type is passed in, then we check to see if the cart has at least one of those types of OrderItems """ if not user.is_authenticated(): return False cart = cls.get_cart_for_user(user) if not item_types: # check to see if the cart has at least some item in it return cart.has_items() else: # if the caller is explicitly asking to check for particular types for item_type in item_types: if cart.has_items(item_type): return True return False @classmethod def remove_cart_item_from_order(cls, item, user): """ Removes the item from the cart if the item.order.status == 'cart'. Also removes any code redemption associated with the order_item """ if item.order.status == 'cart': log.info("order item %s removed for user %s", str(item.id), user) item.delete() # remove any redemption entry associated with the item CouponRedemption.remove_code_redemption_from_item(item, user) @property def total_cost(self): """ Return the total cost of the cart. If the order has been purchased, returns total of all purchased and not refunded items. """ return sum(i.line_cost for i in self.orderitem_set.filter(status=self.status)) def has_items(self, item_type=None): """ Does the cart have any items in it? If an item_type is passed in then we check to see if there are any items of that class type """ if not item_type: return self.orderitem_set.exists() else: items = self.orderitem_set.all().select_subclasses() for item in items: if isinstance(item, item_type): return True return False def reset_cart_items_prices(self): """ Reset the items price state in the user cart """ for item in self.orderitem_set.all(): if item.is_discounted: item.unit_cost = item.list_price item.save() def clear(self): """ Clear out all the items in the cart """ self.orderitem_set.all().delete() @transaction.atomic def start_purchase(self): """ Start the purchase process. This will set the order status to "paying", at which point it should no longer be modified. Future calls to `Order.get_cart_for_user()` will filter out orders with status "paying", effectively creating a new (empty) cart. """ if self.status == 'cart': self.status = 'paying' self.save() for item in OrderItem.objects.filter(order=self).select_subclasses(): item.start_purchase() def update_order_type(self): """ updating order type. This method wil inspect the quantity associated with the OrderItem. In the application, it is implied that when qty > 1, then the user is to purchase 'RegistrationCodes' which are randomly generated strings that users can distribute to others in order for them to enroll in paywalled courses. The UI/UX may change in the future to make the switching between PaidCourseRegistration and CourseRegCodeItems a more explicit UI gesture from the purchaser """ cart_items = self.orderitem_set.all() is_order_type_business = False for cart_item in cart_items: if cart_item.qty > 1: is_order_type_business = True items_to_delete = [] old_to_new_id_map = [] if is_order_type_business: for cart_item in cart_items: if hasattr(cart_item, 'paidcourseregistration'): course_reg_code_item = CourseRegCodeItem.add_to_order(self, cart_item.paidcourseregistration.course_id, cart_item.qty) # update the discounted prices if coupon redemption applied course_reg_code_item.list_price = cart_item.list_price course_reg_code_item.unit_cost = cart_item.unit_cost course_reg_code_item.save() items_to_delete.append(cart_item) old_to_new_id_map.append({"oldId": cart_item.id, "newId": course_reg_code_item.id}) else: for cart_item in cart_items: if hasattr(cart_item, 'courseregcodeitem'): paid_course_registration = PaidCourseRegistration.add_to_order(self, cart_item.courseregcodeitem.course_id) # update the discounted prices if coupon redemption applied paid_course_registration.list_price = cart_item.list_price paid_course_registration.unit_cost = cart_item.unit_cost paid_course_registration.save() items_to_delete.append(cart_item) old_to_new_id_map.append({"oldId": cart_item.id, "newId": paid_course_registration.id}) for item in items_to_delete: item.delete() self.order_type = OrderTypes.BUSINESS if is_order_type_business else OrderTypes.PERSONAL self.save() return old_to_new_id_map def generate_pdf_receipt(self, order_items): """ Generates the pdf receipt for the given order_items and returns the pdf_buffer. """ items_data = [] for item in order_items: item_total = item.qty * item.unit_cost items_data.append({ 'item_description': item.pdf_receipt_display_name, 'quantity': item.qty, 'list_price': item.get_list_price(), 'discount': item.get_list_price() - item.unit_cost, 'item_total': item_total }) pdf_buffer = BytesIO() PDFInvoice( items_data=items_data, item_id=str(self.id), date=self.purchase_time, is_invoice=False, total_cost=self.total_cost, payment_received=self.total_cost, balance=0 ).generate_pdf(pdf_buffer) return pdf_buffer def generate_registration_codes_csv(self, orderitems, site_name): """ this function generates the csv file """ course_info = [] csv_file = StringIO.StringIO() csv_writer = csv.writer(csv_file) csv_writer.writerow(['Course Name', 'Registration Code', 'URL']) for item in orderitems: course_id = item.course_id course = get_course_by_id(item.course_id, depth=0) registration_codes = CourseRegistrationCode.objects.filter(course_id=course_id, order=self) course_info.append((course.display_name, ' (' + course.start_datetime_text() + '-' + course.end_datetime_text() + ')')) for registration_code in registration_codes: redemption_url = reverse('register_code_redemption', args=[registration_code.code]) url = '{base_url}{redemption_url}'.format(base_url=site_name, redemption_url=redemption_url) csv_writer.writerow([unicode(course.display_name).encode("utf-8"), registration_code.code, url]) return csv_file, course_info def send_confirmation_emails(self, orderitems, is_order_type_business, csv_file, pdf_file, site_name, courses_info): """ send confirmation e-mail """ recipient_list = [(self.user.username, self.user.email, 'user')] # pylint: disable=no-member if self.company_contact_email: recipient_list.append((self.company_contact_name, self.company_contact_email, 'company_contact')) joined_course_names = "" if self.recipient_email: recipient_list.append((self.recipient_name, self.recipient_email, 'email_recipient')) courses_names_with_dates = [course_info[0] + course_info[1] for course_info in courses_info] joined_course_names = " " + ", ".join(courses_names_with_dates) if not is_order_type_business: subject = _("Order Payment Confirmation") else: subject = _('Confirmation and Registration Codes for the following courses: {course_name_list}').format( course_name_list=joined_course_names ) dashboard_url = '{base_url}{dashboard}'.format( base_url=site_name, dashboard=reverse('dashboard') ) try: from_address = microsite.get_value( 'email_from_address', settings.PAYMENT_SUPPORT_EMAIL ) # Send a unique email for each recipient. Don't put all email addresses in a single email. for recipient in recipient_list: message = render_to_string( 'emails/business_order_confirmation_email.txt' if is_order_type_business else 'emails/order_confirmation_email.txt', { 'order': self, 'recipient_name': recipient[0], 'recipient_type': recipient[2], 'site_name': site_name, 'order_items': orderitems, 'course_names': ", ".join([course_info[0] for course_info in courses_info]), 'dashboard_url': dashboard_url, 'currency_symbol': settings.PAID_COURSE_REGISTRATION_CURRENCY[1], 'order_placed_by': '{username} ({email})'.format( username=self.user.username, email=self.user.email ), 'has_billing_info': settings.FEATURES['STORE_BILLING_INFO'], 'platform_name': microsite.get_value('platform_name', settings.PLATFORM_NAME), 'payment_support_email': microsite.get_value('payment_support_email', settings.PAYMENT_SUPPORT_EMAIL), 'payment_email_signature': microsite.get_value('payment_email_signature'), } ) email = EmailMessage( subject=subject, body=message, from_email=from_address, to=[recipient[1]] ) # Only the business order is HTML formatted. A single seat order confirmation is plain text. if is_order_type_business: email.content_subtype = "html" if csv_file: email.attach(u'RegistrationCodesRedemptionUrls.csv', csv_file.getvalue(), 'text/csv') if pdf_file is not None: email.attach(u'Receipt.pdf', pdf_file.getvalue(), 'application/pdf') else: file_buffer = StringIO.StringIO(_('pdf download unavailable right now, please contact support.')) email.attach(u'pdf_not_available.txt', file_buffer.getvalue(), 'text/plain') email.send() except (smtplib.SMTPException, BotoServerError): # sadly need to handle diff. mail backends individually log.error('Failed sending confirmation e-mail for order %d', self.id) def purchase(self, first='', last='', street1='', street2='', city='', state='', postalcode='', country='', ccnum='', cardtype='', processor_reply_dump=''): """ Call to mark this order as purchased. Iterates through its OrderItems and calls their purchased_callback `first` - first name of person billed (e.g. John) `last` - last name of person billed (e.g. Smith) `street1` - first line of a street address of the billing address (e.g. 11 Cambridge Center) `street2` - second line of a street address of the billing address (e.g. Suite 101) `city` - city of the billing address (e.g. Cambridge) `state` - code of the state, province, or territory of the billing address (e.g. MA) `postalcode` - postal code of the billing address (e.g. 02142) `country` - country code of the billing address (e.g. US) `ccnum` - last 4 digits of the credit card number of the credit card billed (e.g. 1111) `cardtype` - 3-digit code representing the card type used (e.g. 001) `processor_reply_dump` - all the parameters returned by the processor """ if self.status == 'purchased': log.error( u"`purchase` method called on order {}, but order is already purchased.".format(self.id) # pylint: disable=no-member ) return self.status = 'purchased' self.purchase_time = datetime.now(pytz.utc) self.bill_to_first = first self.bill_to_last = last self.bill_to_city = city self.bill_to_state = state self.bill_to_country = country self.bill_to_postalcode = postalcode if settings.FEATURES['STORE_BILLING_INFO']: self.bill_to_street1 = street1 self.bill_to_street2 = street2 self.bill_to_ccnum = ccnum self.bill_to_cardtype = cardtype self.processor_reply_dump = processor_reply_dump # save these changes on the order, then we can tell when we are in an # inconsistent state self.save() # this should return all of the objects with the correct types of the # subclasses orderitems = OrderItem.objects.filter(order=self).select_subclasses() site_name = microsite.get_value('SITE_NAME', settings.SITE_NAME) if self.order_type == OrderTypes.BUSINESS: self.update_order_type() for item in orderitems: item.purchase_item() csv_file = None courses_info = [] if self.order_type == OrderTypes.BUSINESS: # # Generate the CSV file that contains all of the RegistrationCodes that have already been # generated when the purchase has transacted # csv_file, courses_info = self.generate_registration_codes_csv(orderitems, site_name) try: pdf_file = self.generate_pdf_receipt(orderitems) except Exception: # pylint: disable=broad-except log.exception('Exception at creating pdf file.') pdf_file = None try: self.send_confirmation_emails( orderitems, self.order_type == OrderTypes.BUSINESS, csv_file, pdf_file, site_name, courses_info ) except Exception: # pylint: disable=broad-except # Catch all exceptions here, since the Django view implicitly # wraps this in a transaction. If the order completes successfully, # we don't want to roll back just because we couldn't send # the confirmation email. log.exception('Error occurred while sending payment confirmation email') self._emit_order_event('Completed Order', orderitems) def refund(self): """ Refund the given order. As of right now, this just marks the order as refunded. """ self.status = 'refunded' self.save() orderitems = OrderItem.objects.filter(order=self).select_subclasses() self._emit_order_event('Refunded Order', orderitems) def _emit_order_event(self, event_name, orderitems): """ Emit an analytics event with the given name for this Order. Will iterate over all associated OrderItems and add them as products in the event as well. """ try: if settings.LMS_SEGMENT_KEY: tracking_context = tracker.get_tracker().resolve_context() analytics.track(self.user.id, event_name, { 'orderId': self.id, 'total': str(self.total_cost), 'currency': self.currency, 'products': [item.analytics_data() for item in orderitems] }, context={ 'ip': tracking_context.get('ip'), 'Google Analytics': { 'clientId': tracking_context.get('client_id') } }) except Exception: # pylint: disable=broad-except # Capturing all exceptions thrown while tracking analytics events. We do not want # an operation to fail because of an analytics event, so we will capture these # errors in the logs. log.exception( u'Unable to emit {event} event for user {user} and order {order}'.format( event=event_name, user=self.user.id, order=self.id) ) def add_billing_details(self, company_name='', company_contact_name='', company_contact_email='', recipient_name='', recipient_email='', customer_reference_number=''): """ This function is called after the user selects a purchase type of "Business" and is asked to enter the optional billing details. The billing details are updated for that order. company_name - Name of purchasing organization company_contact_name - Name of the key contact at the company the sale was made to company_contact_email - Email of the key contact at the company the sale was made to recipient_name - Name of the company should the invoice be sent to recipient_email - Email of the company should the invoice be sent to customer_reference_number - purchase order number of the organization associated with this Order """ self.company_name = company_name self.company_contact_name = company_contact_name self.company_contact_email = company_contact_email self.recipient_name = recipient_name self.recipient_email = recipient_email self.customer_reference_number = customer_reference_number self.save() def generate_receipt_instructions(self): """ Call to generate specific instructions for each item in the order. This gets displayed on the receipt page, typically. Instructions are something like "visit your dashboard to see your new courses". This will return two things in a pair. The first will be a dict with keys=OrderItemSubclassPK corresponding to an OrderItem and values=a set of html instructions they generate. The second will be a set of de-duped html instructions """ instruction_set = set([]) # heh. not ia32 or alpha or sparc instruction_dict = {} order_items = OrderItem.objects.filter(order=self).select_subclasses() for item in order_items: item_pk_with_subclass, set_of_html = item.generate_receipt_instructions() instruction_dict[item_pk_with_subclass] = set_of_html instruction_set.update(set_of_html) return instruction_dict, instruction_set def retire(self): """ Method to "retire" orders that have gone through to the payment service but have (erroneously) not had their statuses updated. This method only works on orders that satisfy the following conditions: 1) the order status is either "cart" or "paying" (otherwise we raise an InvalidStatusToRetire error) 2) the order's order item's statuses match the order's status (otherwise we throw an UnexpectedOrderItemStatus error) """ # if an order is already retired, no-op: if self.status in ORDER_STATUS_MAP.values(): return if self.status not in ORDER_STATUS_MAP.keys(): raise InvalidStatusToRetire( "order status {order_status} is not 'paying' or 'cart'".format( order_status=self.status ) ) for item in self.orderitem_set.all(): if item.status != self.status: raise UnexpectedOrderItemStatus( "order_item status is different from order status" ) self.status = ORDER_STATUS_MAP[self.status] self.save() for item in self.orderitem_set.all(): item.retire() def find_item_by_course_id(self, course_id): """ course_id: Course id of the item to find Returns OrderItem from the Order given a course_id Raises exception ItemNotFoundException when the item having the given course_id is not present in the cart """ cart_items = OrderItem.objects.filter(order=self).select_subclasses() found_items = [] for item in cart_items: if getattr(item, 'course_id', None): if item.course_id == course_id: found_items.append(item) if not found_items: raise ItemNotFoundInCartException return found_items class OrderItem(TimeStampedModel): """ This is the basic interface for order items. Order items are line items that fill up the shopping carts and orders. Each implementation of OrderItem should provide its own purchased_callback as a method. """ class Meta(object): app_label = "shoppingcart" objects = InheritanceManager() order = models.ForeignKey(Order, db_index=True) # this is denormalized, but convenient for SQL queries for reports, etc. user should always be = order.user user = models.ForeignKey(User, db_index=True) # this is denormalized, but convenient for SQL queries for reports, etc. status should always be = order.status status = models.CharField(max_length=32, default='cart', choices=ORDER_STATUSES, db_index=True) qty = models.IntegerField(default=1) unit_cost = models.DecimalField(default=0.0, decimal_places=2, max_digits=30) list_price = models.DecimalField(decimal_places=2, max_digits=30, null=True) line_desc = models.CharField(default="Misc. Item", max_length=1024) currency = models.CharField(default="usd", max_length=8) # lower case ISO currency codes fulfilled_time = models.DateTimeField(null=True, db_index=True) refund_requested_time = models.DateTimeField(null=True, db_index=True) service_fee = models.DecimalField(default=0.0, decimal_places=2, max_digits=30) # general purpose field, not user-visible. Used for reporting report_comments = models.TextField(default="") @property def line_cost(self): """ Return the total cost of this OrderItem """ return self.qty * self.unit_cost @classmethod def add_to_order(cls, order, *args, **kwargs): """ A suggested convenience function for subclasses. NOTE: This does not add anything to the cart. That is left up to the subclasses to implement for themselves """ # this is a validation step to verify that the currency of the item we # are adding is the same as the currency of the order we are adding it # to currency = kwargs.get('currency', 'usd') if order.currency != currency and order.orderitem_set.exists(): raise InvalidCartItem(_("Trying to add a different currency into the cart")) @transaction.atomic def purchase_item(self): """ This is basically a wrapper around purchased_callback that handles modifying the OrderItem itself """ self.purchased_callback() self.status = 'purchased' self.fulfilled_time = datetime.now(pytz.utc) self.save() def start_purchase(self): """ Start the purchase process. This will set the order item status to "paying", at which point it should no longer be modified. """ self.status = 'paying' self.save() def purchased_callback(self): """ This is called on each inventory item in the shopping cart when the purchase goes through. """ raise NotImplementedError def generate_receipt_instructions(self): """ This is called on each item in a purchased order to generate receipt instructions. This should return a list of `ReceiptInstruction`s in HTML string Default implementation is to return an empty set """ return self.pk_with_subclass, set([]) @property def pk_with_subclass(self): """ Returns a named tuple that annotates the pk of this instance with its class, to fully represent a pk of a subclass (inclusive) of OrderItem """ return OrderItemSubclassPK(type(self), self.pk) @property def is_discounted(self): """ Returns True if the item a discount coupon has been applied to the OrderItem and False otherwise. Earlier, the OrderItems were stored with an empty list_price if a discount had not been applied. Now we consider the item to be non discounted if list_price is None or list_price == unit_cost. In these lines, an item is discounted if it's non-None and list_price and unit_cost mismatch. This should work with both new and old records. """ return self.list_price and self.list_price != self.unit_cost def get_list_price(self): """ Returns the unit_cost if no discount has been applied, or the list_price if it is defined. """ return self.list_price if self.list_price else self.unit_cost @property def single_item_receipt_template(self): """ The template that should be used when there's only one item in the order """ return 'shoppingcart/receipt.html' @property def single_item_receipt_context(self): """ Extra variables needed to render the template specified in `single_item_receipt_template` """ return {} def additional_instruction_text(self, **kwargs): # pylint: disable=unused-argument """ Individual instructions for this order item. Currently, only used for emails. """ return '' @property def pdf_receipt_display_name(self): """ How to display this item on a PDF printed receipt file. This can be overridden by the subclasses of OrderItem """ course_key = getattr(self, 'course_id', None) if course_key: course = get_course_by_id(course_key, depth=0) return course.display_name else: raise Exception( "Not Implemented. OrderItems that are not Course specific should have" " a overridden pdf_receipt_display_name property" ) def analytics_data(self): """Simple function used to construct analytics data for the OrderItem. The default implementation returns defaults for most attributes. When no name or category is specified by the implementation, the string 'N/A' is placed for the name and category. This should be handled appropriately by all implementations. Returns A dictionary containing analytics data for this OrderItem. """ return { 'id': self.id, 'sku': type(self).__name__, 'name': 'N/A', 'price': str(self.unit_cost), 'quantity': self.qty, 'category': 'N/A', } def retire(self): """ Called by the `retire` method defined in the `Order` class. Retires an order item if its (and its order's) status was erroneously not updated to "purchased" after the order was processed. """ self.status = ORDER_STATUS_MAP[self.status] self.save() class Invoice(TimeStampedModel): """ This table capture all the information needed to support "invoicing" which is when a user wants to purchase Registration Codes, but will not do so via a Credit Card transaction. """ class Meta(object): app_label = "shoppingcart" company_name = models.CharField(max_length=255, db_index=True) company_contact_name = models.CharField(max_length=255) company_contact_email = models.CharField(max_length=255) recipient_name = models.CharField(max_length=255) recipient_email = models.CharField(max_length=255) address_line_1 = models.CharField(max_length=255) address_line_2 = models.CharField(max_length=255, null=True, blank=True) address_line_3 = models.CharField(max_length=255, null=True, blank=True) city = models.CharField(max_length=255, null=True) state = models.CharField(max_length=255, null=True) zip = models.CharField(max_length=15, null=True) country = models.CharField(max_length=64, null=True) # This field has been deprecated. # The total amount can now be calculated as the sum # of each invoice item associated with the invoice. # For backwards compatibility, this field is maintained # and written to during invoice creation. total_amount = models.FloatField() # This field has been deprecated in order to support # invoices for items that are not course-related. # Although this field is still maintained for backwards # compatibility, you should use CourseRegistrationCodeInvoiceItem # to look up the course ID for purchased redeem codes. course_id = CourseKeyField(max_length=255, db_index=True) internal_reference = models.CharField( max_length=255, null=True, blank=True, help_text=ugettext_lazy("Internal reference code for this invoice.") ) customer_reference_number = models.CharField( max_length=63, null=True, blank=True, help_text=ugettext_lazy("Customer's reference code for this invoice.") ) is_valid = models.BooleanField(default=True) @classmethod def get_invoice_total_amount_for_course(cls, course_key): """ returns the invoice total amount generated by course. """ result = cls.objects.filter(course_id=course_key, is_valid=True).aggregate(total=Sum('total_amount')) total = result.get('total', 0) return total if total else 0 def generate_pdf_invoice(self, course, course_price, quantity, sale_price): """ Generates the pdf invoice for the given course and returns the pdf_buffer. """ discount_per_item = float(course_price) - sale_price / quantity list_price = course_price - discount_per_item items_data = [{ 'item_description': course.display_name, 'quantity': quantity, 'list_price': list_price, 'discount': discount_per_item, 'item_total': quantity * list_price }] pdf_buffer = BytesIO() PDFInvoice( items_data=items_data, item_id=str(self.id), date=datetime.now(pytz.utc), is_invoice=True, total_cost=float(self.total_amount), payment_received=0, balance=float(self.total_amount) ).generate_pdf(pdf_buffer) return pdf_buffer def snapshot(self): """Create a snapshot of the invoice. A snapshot is a JSON-serializable representation of the invoice's state, including its line items and associated transactions (payments/refunds). This is useful for saving the history of changes to the invoice. Returns: dict """ return { 'internal_reference': self.internal_reference, 'customer_reference': self.customer_reference_number, 'is_valid': self.is_valid, 'contact_info': { 'company_name': self.company_name, 'company_contact_name': self.company_contact_name, 'company_contact_email': self.company_contact_email, 'recipient_name': self.recipient_name, 'recipient_email': self.recipient_email, 'address_line_1': self.address_line_1, 'address_line_2': self.address_line_2, 'address_line_3': self.address_line_3, 'city': self.city, 'state': self.state, 'zip': self.zip, 'country': self.country, }, 'items': [ item.snapshot() for item in InvoiceItem.objects.filter(invoice=self).select_subclasses() ], 'transactions': [ trans.snapshot() for trans in InvoiceTransaction.objects.filter(invoice=self) ], } def __unicode__(self): label = ( unicode(self.internal_reference) if self.internal_reference else u"No label" ) created = ( self.created.strftime("%Y-%m-%d") if self.created else u"No date" ) return u"{label} ({date_created})".format( label=label, date_created=created ) INVOICE_TRANSACTION_STATUSES = ( # A payment/refund is in process, but money has not yet been transferred ('started', 'started'), # A payment/refund has completed successfully # This should be set ONLY once money has been successfully exchanged. ('completed', 'completed'), # A payment/refund was promised, but was cancelled before # money had been transferred. An example would be # cancelling a refund check before the recipient has # a chance to deposit it. ('cancelled', 'cancelled') ) class InvoiceTransaction(TimeStampedModel): """Record payment and refund information for invoices. There are two expected use cases: 1) We send an invoice to someone, and they send us a check. We then manually create an invoice transaction to represent the payment. 2) We send an invoice to someone, and they pay us. Later, we need to issue a refund for the payment. We manually create a transaction with a negative amount to represent the refund. """ class Meta(object): app_label = "shoppingcart" invoice = models.ForeignKey(Invoice) amount = models.DecimalField( default=0.0, decimal_places=2, max_digits=30, help_text=ugettext_lazy( "The amount of the transaction. Use positive amounts for payments" " and negative amounts for refunds." ) ) currency = models.CharField( default="usd", max_length=8, help_text=ugettext_lazy("Lower-case ISO currency codes") ) comments = models.TextField( null=True, blank=True, help_text=ugettext_lazy("Optional: provide additional information for this transaction") ) status = models.CharField( max_length=32, default='started', choices=INVOICE_TRANSACTION_STATUSES, help_text=ugettext_lazy( "The status of the payment or refund. " "'started' means that payment is expected, but money has not yet been transferred. " "'completed' means that the payment or refund was received. " "'cancelled' means that payment or refund was expected, but was cancelled before money was transferred. " ) ) created_by = models.ForeignKey(User) last_modified_by = models.ForeignKey(User, related_name='last_modified_by_user') @classmethod def get_invoice_transaction(cls, invoice_id): """ if found Returns the Invoice Transaction object for the given invoice_id else returns None """ try: return cls.objects.get(Q(invoice_id=invoice_id), Q(status='completed') | Q(status='refunded')) except InvoiceTransaction.DoesNotExist: return None @classmethod def get_total_amount_of_paid_course_invoices(cls, course_key): """ returns the total amount of the paid invoices. """ result = cls.objects.filter(amount__gt=0, invoice__course_id=course_key, status='completed').aggregate( total=Sum( 'amount', output_field=models.DecimalField(decimal_places=2, max_digits=30) ) ) total = result.get('total', 0) return total if total else 0 def snapshot(self): """Create a snapshot of the invoice transaction. The returned dictionary is JSON-serializable. Returns: dict """ return { 'amount': unicode(self.amount), 'currency': self.currency, 'comments': self.comments, 'status': self.status, 'created_by': self.created_by.username, 'last_modified_by': self.last_modified_by.username } class InvoiceItem(TimeStampedModel): """ This is the basic interface for invoice items. Each invoice item represents a "line" in the invoice. For example, in an invoice for course registration codes, there might be an invoice item representing 10 registration codes for the DemoX course. """ class Meta(object): app_label = "shoppingcart" objects = InheritanceManager() invoice = models.ForeignKey(Invoice, db_index=True) qty = models.IntegerField( default=1, help_text=ugettext_lazy("The number of items sold.") ) unit_price = models.DecimalField( default=0.0, decimal_places=2, max_digits=30, help_text=ugettext_lazy("The price per item sold, including discounts.") ) currency = models.CharField( default="usd", max_length=8, help_text=ugettext_lazy("Lower-case ISO currency codes") ) def snapshot(self): """Create a snapshot of the invoice item. The returned dictionary is JSON-serializable. Returns: dict """ return { 'qty': self.qty, 'unit_price': unicode(self.unit_price), 'currency': self.currency } class CourseRegistrationCodeInvoiceItem(InvoiceItem): """ This is an invoice item that represents a payment for a course registration. """ class Meta(object): app_label = "shoppingcart" course_id = CourseKeyField(max_length=128, db_index=True) def snapshot(self): """Create a snapshot of the invoice item. This is the same as a snapshot for other invoice items, with the addition of a `course_id` field. Returns: dict """ snapshot = super(CourseRegistrationCodeInvoiceItem, self).snapshot() snapshot['course_id'] = unicode(self.course_id) return snapshot class InvoiceHistory(models.Model): """History of changes to invoices. This table stores snapshots of invoice state, including the associated line items and transactions (payments/refunds). Entries in the table are created, but never deleted or modified. We use Django signals to save history entries on change events. These signals are fired within a database transaction, so the history record is created only if the invoice change is successfully persisted. """ timestamp = models.DateTimeField(auto_now_add=True, db_index=True) invoice = models.ForeignKey(Invoice) # JSON-serialized representation of the current state # of the invoice, including its line items and # transactions (payments/refunds). snapshot = models.TextField(blank=True) @classmethod def save_invoice_snapshot(cls, invoice): """Save a snapshot of the invoice's current state. Arguments: invoice (Invoice): The invoice to save. """ cls.objects.create( invoice=invoice, snapshot=json.dumps(invoice.snapshot()) ) @staticmethod def snapshot_receiver(sender, instance, **kwargs): # pylint: disable=unused-argument """Signal receiver that saves a snapshot of an invoice. Arguments: sender: Not used, but required by Django signals. instance (Invoice, InvoiceItem, or InvoiceTransaction) """ if isinstance(instance, Invoice): InvoiceHistory.save_invoice_snapshot(instance) elif hasattr(instance, 'invoice'): InvoiceHistory.save_invoice_snapshot(instance.invoice) class Meta(object): get_latest_by = "timestamp" app_label = "shoppingcart" # Hook up Django signals to record changes in the history table. # We record any change to an invoice, invoice item, or transaction. # We also record any deletion of a transaction, since users can delete # transactions via Django admin. # Note that we need to include *each* InvoiceItem subclass # here, since Django signals do not fire automatically for subclasses # of the "sender" class. post_save.connect(InvoiceHistory.snapshot_receiver, sender=Invoice) post_save.connect(InvoiceHistory.snapshot_receiver, sender=InvoiceItem) post_save.connect(InvoiceHistory.snapshot_receiver, sender=CourseRegistrationCodeInvoiceItem) post_save.connect(InvoiceHistory.snapshot_receiver, sender=InvoiceTransaction) post_delete.connect(InvoiceHistory.snapshot_receiver, sender=InvoiceTransaction) class CourseRegistrationCode(models.Model): """ This table contains registration codes With registration code, a user can register for a course for free """ class Meta(object): app_label = "shoppingcart" code = models.CharField(max_length=32, db_index=True, unique=True) course_id = CourseKeyField(max_length=255, db_index=True) created_by = models.ForeignKey(User, related_name='created_by_user') created_at = models.DateTimeField(auto_now_add=True) order = models.ForeignKey(Order, db_index=True, null=True, related_name="purchase_order") mode_slug = models.CharField(max_length=100, null=True) is_valid = models.BooleanField(default=True) # For backwards compatibility, we maintain the FK to "invoice" # In the future, we will remove this in favor of the FK # to "invoice_item" (which can be used to look up the invoice). invoice = models.ForeignKey(Invoice, null=True) invoice_item = models.ForeignKey(CourseRegistrationCodeInvoiceItem, null=True) @classmethod def order_generated_registration_codes(cls, course_id): """ Returns the registration codes that were generated via bulk purchase scenario. """ return cls.objects.filter(order__isnull=False, course_id=course_id) @classmethod def invoice_generated_registration_codes(cls, course_id): """ Returns the registration codes that were generated via invoice. """ return cls.objects.filter(invoice__isnull=False, course_id=course_id) class RegistrationCodeRedemption(models.Model): """ This model contains the registration-code redemption info """ class Meta(object): app_label = "shoppingcart" order = models.ForeignKey(Order, db_index=True, null=True) registration_code = models.ForeignKey(CourseRegistrationCode, db_index=True) redeemed_by = models.ForeignKey(User, db_index=True) redeemed_at = models.DateTimeField(auto_now_add=True, null=True) course_enrollment = models.ForeignKey(CourseEnrollment, null=True) @classmethod def registration_code_used_for_enrollment(cls, course_enrollment): """ Returns RegistrationCodeRedemption object if registration code has been used during the course enrollment else Returns None. """ # theoretically there could be more than one (e.g. someone self-unenrolls # then re-enrolls with a different regcode) reg_codes = cls.objects.filter(course_enrollment=course_enrollment).order_by('-redeemed_at') if reg_codes: # return the first one. In all normal use cases of registration codes # the user will only have one return reg_codes[0] return None @classmethod def is_registration_code_redeemed(cls, course_reg_code): """ Checks the existence of the registration code in the RegistrationCodeRedemption """ return cls.objects.filter(registration_code__code=course_reg_code).exists() @classmethod def get_registration_code_redemption(cls, code, course_id): """ Returns the registration code redemption object if found else returns None. """ try: code_redemption = cls.objects.get(registration_code__code=code, registration_code__course_id=course_id) except cls.DoesNotExist: code_redemption = None return code_redemption @classmethod def create_invoice_generated_registration_redemption(cls, course_reg_code, user): # pylint: disable=invalid-name """ This function creates a RegistrationCodeRedemption entry in case the registration codes were invoice generated and thus the order_id is missing. """ code_redemption = RegistrationCodeRedemption(registration_code=course_reg_code, redeemed_by=user) code_redemption.save() return code_redemption class SoftDeleteCouponManager(models.Manager): """ Use this manager to get objects that have a is_active=True """ def get_active_coupons_queryset(self): """ filter the is_active = True Coupons only """ return super(SoftDeleteCouponManager, self).get_queryset().filter(is_active=True) def get_queryset(self): """ get all the coupon objects """ return super(SoftDeleteCouponManager, self).get_queryset() class Coupon(models.Model): """ This table contains coupon codes A user can get a discount offer on course if provide coupon code """ class Meta(object): app_label = "shoppingcart" code = models.CharField(max_length=32, db_index=True) description = models.CharField(max_length=255, null=True, blank=True) course_id = CourseKeyField(max_length=255) percentage_discount = models.IntegerField(default=0) created_by = models.ForeignKey(User) created_at = models.DateTimeField(auto_now_add=True) is_active = models.BooleanField(default=True) expiration_date = models.DateTimeField(null=True, blank=True) def __unicode__(self): return "[Coupon] code: {} course: {}".format(self.code, self.course_id) objects = SoftDeleteCouponManager() @property def display_expiry_date(self): """ return the coupon expiration date in the readable format """ return (self.expiration_date - timedelta(days=1)).strftime("%B %d, %Y") if self.expiration_date else None class CouponRedemption(models.Model): """ This table contain coupon redemption info """ class Meta(object): app_label = "shoppingcart" order = models.ForeignKey(Order, db_index=True) user = models.ForeignKey(User, db_index=True) coupon = models.ForeignKey(Coupon, db_index=True) @classmethod def remove_code_redemption_from_item(cls, item, user): """ If an item removed from shopping cart then we will remove the corresponding redemption info of coupon code """ order_item_course_id = item.course_id try: # Try to remove redemption information of coupon code, If exist. coupon_redemption = cls.objects.get( user=user, coupon__course_id=order_item_course_id if order_item_course_id else CourseKeyField.Empty, order=item.order_id ) coupon_redemption.delete() log.info( u'Coupon "%s" redemption entry removed for user "%s" for order item "%s"', coupon_redemption.coupon.code, user, str(item.id), ) except CouponRedemption.DoesNotExist: log.debug(u'Code redemption does not exist for order item id=%s.', str(item.id)) @classmethod def remove_coupon_redemption_from_cart(cls, user, cart): """ This method delete coupon redemption """ coupon_redemption = cls.objects.filter(user=user, order=cart) if coupon_redemption: coupon_redemption.delete() log.info(u'Coupon redemption entry removed for user %s for order %s', user, cart.id) @classmethod def get_discount_price(cls, percentage_discount, value): """ return discounted price against coupon """ discount = Decimal("{0:.2f}".format(Decimal(percentage_discount / 100.00) * value)) return value - discount @classmethod def add_coupon_redemption(cls, coupon, order, cart_items): """ add coupon info into coupon_redemption model """ is_redemption_applied = False coupon_redemptions = cls.objects.filter(order=order, user=order.user) for coupon_redemption in coupon_redemptions: if coupon_redemption.coupon.code != coupon.code or coupon_redemption.coupon.id == coupon.id: log.exception( u"Coupon redemption already exist for user '%s' against order id '%s'", order.user.username, order.id, ) raise MultipleCouponsNotAllowedException for item in cart_items: if item.course_id: if item.course_id == coupon.course_id: coupon_redemption = cls(order=order, user=order.user, coupon=coupon) coupon_redemption.save() discount_price = cls.get_discount_price(coupon.percentage_discount, item.unit_cost) item.list_price = item.unit_cost item.unit_cost = discount_price item.save() log.info( u"Discount generated for user %s against order id '%s'", order.user.username, order.id, ) is_redemption_applied = True return is_redemption_applied return is_redemption_applied @classmethod def get_top_discount_codes_used(cls, course_id): """ Returns the top discount codes used. QuerySet = [ { 'coupon__percentage_discount': 22, 'coupon__code': '12', 'coupon__used_count': '2', }, { ... } ] """ return cls.objects.filter(order__status='purchased', coupon__course_id=course_id).values( 'coupon__code', 'coupon__percentage_discount' ).annotate(coupon__used_count=Count('coupon__code')).order_by('-coupon__used_count') @classmethod def get_total_coupon_code_purchases(cls, course_id): """ returns total seats purchases using coupon codes """ return cls.objects.filter(order__status='purchased', coupon__course_id=course_id).aggregate(Count('coupon')) class PaidCourseRegistration(OrderItem): """ This is an inventory item for paying for a course registration """ class Meta(object): app_label = "shoppingcart" course_id = CourseKeyField(max_length=128, db_index=True) mode = models.SlugField(default=CourseMode.DEFAULT_SHOPPINGCART_MODE_SLUG) course_enrollment = models.ForeignKey(CourseEnrollment, null=True) @classmethod def get_self_purchased_seat_count(cls, course_key, status='purchased'): """ returns the count of paid_course items filter by course_id and status. """ return cls.objects.filter(course_id=course_key, status=status).count() @classmethod def get_course_item_for_user_enrollment(cls, user, course_id, course_enrollment): """ Returns PaidCourseRegistration object if user has payed for the course enrollment else Returns None """ try: return cls.objects.filter(course_id=course_id, user=user, course_enrollment=course_enrollment, status='purchased').latest('id') except PaidCourseRegistration.DoesNotExist: return None @classmethod def contained_in_order(cls, order, course_id): """ Is the course defined by course_id contained in the order? """ return course_id in [ item.course_id for item in order.orderitem_set.all().select_subclasses("paidcourseregistration") if isinstance(item, cls) ] @classmethod def get_total_amount_of_purchased_item(cls, course_key, status='purchased'): """ This will return the total amount of money that a purchased course generated """ total_cost = 0 result = cls.objects.filter(course_id=course_key, status=status).aggregate( total=Sum( F('qty') * F('unit_cost'), output_field=models.DecimalField(decimal_places=2, max_digits=30) ) ) if result['total'] is not None: total_cost = result['total'] return total_cost @classmethod @transaction.atomic def add_to_order(cls, order, course_id, mode_slug=CourseMode.DEFAULT_SHOPPINGCART_MODE_SLUG, cost=None, currency=None): # pylint: disable=arguments-differ """ A standardized way to create these objects, with sensible defaults filled in. Will update the cost if called on an order that already carries the course. Returns the order item """ # First a bunch of sanity checks: # actually fetch the course to make sure it exists, use this to # throw errors if it doesn't. course = modulestore().get_course(course_id) if not course: log.error("User {} tried to add non-existent course {} to cart id {}" .format(order.user.email, course_id, order.id)) raise CourseDoesNotExistException if cls.contained_in_order(order, course_id): log.warning( u"User %s tried to add PaidCourseRegistration for course %s, already in cart id %s", order.user.email, course_id, order.id, ) raise ItemAlreadyInCartException if CourseEnrollment.is_enrolled(user=order.user, course_key=course_id): log.warning("User {} trying to add course {} to cart id {}, already registered" .format(order.user.email, course_id, order.id)) raise AlreadyEnrolledInCourseException ### Validations done, now proceed ### handle default arguments for mode_slug, cost, currency course_mode = CourseMode.mode_for_course(course_id, mode_slug) if not course_mode: # user could have specified a mode that's not set, in that case return the DEFAULT_MODE course_mode = CourseMode.DEFAULT_SHOPPINGCART_MODE if not cost: cost = course_mode.min_price if not currency: currency = course_mode.currency super(PaidCourseRegistration, cls).add_to_order(order, course_id, cost, currency=currency) item, created = cls.objects.get_or_create(order=order, user=order.user, course_id=course_id) item.status = order.status item.mode = course_mode.slug item.qty = 1 item.unit_cost = cost item.list_price = cost item.line_desc = _(u'Registration for Course: {course_name}').format( course_name=course.display_name_with_default_escaped) item.currency = currency order.currency = currency item.report_comments = item.csv_report_comments order.save() item.save() log.info("User {} added course registration {} to cart: order {}" .format(order.user.email, course_id, order.id)) return item def purchased_callback(self): """ When purchased, this should enroll the user in the course. We are assuming that course settings for enrollment date are configured such that only if the (user.email, course_id) pair is found in CourseEnrollmentAllowed will the user be allowed to enroll. Otherwise requiring payment would in fact be quite silly since there's a clear back door. """ if not modulestore().has_course(self.course_id): msg = u"The customer purchased Course {0}, but that course doesn't exist!".format(self.course_id) log.error(msg) raise PurchasedCallbackException(msg) # enroll in course and link to the enrollment_id self.course_enrollment = CourseEnrollment.enroll(user=self.user, course_key=self.course_id, mode=self.mode) self.save() log.info("Enrolled {0} in paid course {1}, paid ${2}" .format(self.user.email, self.course_id, self.line_cost)) def generate_receipt_instructions(self): """ Generates instructions when the user has purchased a PaidCourseRegistration. Basically tells the user to visit the dashboard to see their new classes """ notification = _( u"Please visit your {link_start}dashboard{link_end} " u"to see your new course." ).format( link_start=u'<a href="{url}">'.format(url=reverse('dashboard')), link_end=u'</a>', ) return self.pk_with_subclass, set([notification]) @property def csv_report_comments(self): """ Tries to fetch an annotation associated with the course_id from the database. If not found, returns u"". Otherwise returns the annotation """ try: return PaidCourseRegistrationAnnotation.objects.get(course_id=self.course_id).annotation except PaidCourseRegistrationAnnotation.DoesNotExist: return u"" def analytics_data(self): """Simple function used to construct analytics data for the OrderItem. If the Order Item is associated with a course, additional fields will be populated with course information. If there is a mode associated, the mode data is included in the SKU. Returns A dictionary containing analytics data for this OrderItem. """ data = super(PaidCourseRegistration, self).analytics_data() sku = data['sku'] if self.course_id != CourseKeyField.Empty: data['name'] = unicode(self.course_id) data['category'] = unicode(self.course_id.org) if self.mode: data['sku'] = sku + u'.' + unicode(self.mode) return data class CourseRegCodeItem(OrderItem): """ This is an inventory item for paying for generating course registration codes """ class Meta(object): app_label = "shoppingcart" course_id = CourseKeyField(max_length=128, db_index=True) mode = models.SlugField(default=CourseMode.DEFAULT_SHOPPINGCART_MODE_SLUG) @classmethod def get_bulk_purchased_seat_count(cls, course_key, status='purchased'): """ returns the sum of bulk purchases seats. """ total = 0 result = cls.objects.filter(course_id=course_key, status=status).aggregate(total=Sum('qty')) if result['total'] is not None: total = result['total'] return total @classmethod def contained_in_order(cls, order, course_id): """ Is the course defined by course_id contained in the order? """ return course_id in [ item.course_id for item in order.orderitem_set.all().select_subclasses("courseregcodeitem") if isinstance(item, cls) ] @classmethod def get_total_amount_of_purchased_item(cls, course_key, status='purchased'): """ This will return the total amount of money that a purchased course generated """ total_cost = 0 result = cls.objects.filter(course_id=course_key, status=status).aggregate( total=Sum( F('qty') * F('unit_cost'), output_field=models.DecimalField(decimal_places=2, max_digits=30) ) ) if result['total'] is not None: total_cost = result['total'] return total_cost @classmethod @transaction.atomic def add_to_order(cls, order, course_id, qty, mode_slug=CourseMode.DEFAULT_SHOPPINGCART_MODE_SLUG, cost=None, currency=None): # pylint: disable=arguments-differ """ A standardized way to create these objects, with sensible defaults filled in. Will update the cost if called on an order that already carries the course. Returns the order item """ # First a bunch of sanity checks: # actually fetch the course to make sure it exists, use this to # throw errors if it doesn't. course = modulestore().get_course(course_id) if not course: log.error("User {} tried to add non-existent course {} to cart id {}" .format(order.user.email, course_id, order.id)) raise CourseDoesNotExistException if cls.contained_in_order(order, course_id): log.warning("User {} tried to add PaidCourseRegistration for course {}, already in cart id {}" .format(order.user.email, course_id, order.id)) raise ItemAlreadyInCartException if CourseEnrollment.is_enrolled(user=order.user, course_key=course_id): log.warning("User {} trying to add course {} to cart id {}, already registered" .format(order.user.email, course_id, order.id)) raise AlreadyEnrolledInCourseException ### Validations done, now proceed ### handle default arguments for mode_slug, cost, currency course_mode = CourseMode.mode_for_course(course_id, mode_slug) if not course_mode: # user could have specified a mode that's not set, in that case return the DEFAULT_SHOPPINGCART_MODE course_mode = CourseMode.DEFAULT_SHOPPINGCART_MODE if not cost: cost = course_mode.min_price if not currency: currency = course_mode.currency super(CourseRegCodeItem, cls).add_to_order(order, course_id, cost, currency=currency) item, created = cls.objects.get_or_create(order=order, user=order.user, course_id=course_id) # pylint: disable=unused-variable item.status = order.status item.mode = course_mode.slug item.unit_cost = cost item.list_price = cost item.qty = qty item.line_desc = _(u'Enrollment codes for Course: {course_name}').format( course_name=course.display_name_with_default_escaped) item.currency = currency order.currency = currency item.report_comments = item.csv_report_comments order.save() item.save() log.info("User {} added course registration {} to cart: order {}" .format(order.user.email, course_id, order.id)) return item def purchased_callback(self): """ The purchase is completed, this OrderItem type will generate Registration Codes that will be redeemed by users """ if not modulestore().has_course(self.course_id): msg = u"The customer purchased Course {0}, but that course doesn't exist!".format(self.course_id) log.error(msg) raise PurchasedCallbackException(msg) total_registration_codes = int(self.qty) # we need to import here because of a circular dependency # we should ultimately refactor code to have save_registration_code in this models.py # file, but there's also a shared dependency on a random string generator which # is in another PR (for another feature) from instructor.views.api import save_registration_code for i in range(total_registration_codes): # pylint: disable=unused-variable save_registration_code(self.user, self.course_id, self.mode, order=self.order) log.info("Enrolled {0} in paid course {1}, paid ${2}" .format(self.user.email, self.course_id, self.line_cost)) @property def csv_report_comments(self): """ Tries to fetch an annotation associated with the course_id from the database. If not found, returns u"". Otherwise returns the annotation """ try: return CourseRegCodeItemAnnotation.objects.get(course_id=self.course_id).annotation except CourseRegCodeItemAnnotation.DoesNotExist: return u"" def analytics_data(self): """Simple function used to construct analytics data for the OrderItem. If the OrderItem is associated with a course, additional fields will be populated with course information. If a mode is available, it will be included in the SKU. Returns A dictionary containing analytics data for this OrderItem. """ data = super(CourseRegCodeItem, self).analytics_data() sku = data['sku'] if self.course_id != CourseKeyField.Empty: data['name'] = unicode(self.course_id) data['category'] = unicode(self.course_id.org) if self.mode: data['sku'] = sku + u'.' + unicode(self.mode) return data class CourseRegCodeItemAnnotation(models.Model): """ A model that maps course_id to an additional annotation. This is specifically needed because when Stanford generates report for the paid courses, each report item must contain the payment account associated with a course. And unfortunately we didn't have the concept of a "SKU" or stock item where we could keep this association, so this is to retrofit it. """ class Meta(object): app_label = "shoppingcart" course_id = CourseKeyField(unique=True, max_length=128, db_index=True) annotation = models.TextField(null=True) def __unicode__(self): # pylint: disable=no-member return u"{} : {}".format(self.course_id.to_deprecated_string(), self.annotation) class PaidCourseRegistrationAnnotation(models.Model): """ A model that maps course_id to an additional annotation. This is specifically needed because when Stanford generates report for the paid courses, each report item must contain the payment account associated with a course. And unfortunately we didn't have the concept of a "SKU" or stock item where we could keep this association, so this is to retrofit it. """ class Meta(object): app_label = "shoppingcart" course_id = CourseKeyField(unique=True, max_length=128, db_index=True) annotation = models.TextField(null=True) def __unicode__(self): # pylint: disable=no-member return u"{} : {}".format(self.course_id.to_deprecated_string(), self.annotation) class CertificateItem(OrderItem): """ This is an inventory item for purchasing certificates """ class Meta(object): app_label = "shoppingcart" course_id = CourseKeyField(max_length=128, db_index=True) course_enrollment = models.ForeignKey(CourseEnrollment) mode = models.SlugField() @receiver(UNENROLL_DONE) def refund_cert_callback(sender, course_enrollment=None, skip_refund=False, **kwargs): # pylint: disable=no-self-argument,unused-argument """ When a CourseEnrollment object calls its unenroll method, this function checks to see if that unenrollment occurred in a verified certificate that was within the refund deadline. If so, it actually performs the refund. Returns the refunded certificate on a successful refund; else, it returns nothing. """ # Only refund verified cert unenrollments that are within bounds of the expiration date if (not course_enrollment.refundable()) or skip_refund: return target_certs = CertificateItem.objects.filter(course_id=course_enrollment.course_id, user_id=course_enrollment.user, status='purchased', mode='verified') try: target_cert = target_certs[0] except IndexError: log.warning( u"Matching CertificateItem not found while trying to refund. User %s, Course %s", course_enrollment.user, course_enrollment.course_id, ) return target_cert.status = 'refunded' target_cert.refund_requested_time = datetime.now(pytz.utc) target_cert.save() target_cert.order.refund() order_number = target_cert.order_id # send billing an email so they can handle refunding subject = _("[Refund] User-Requested Refund") message = "User {user} ({user_email}) has requested a refund on Order #{order_number}.".format(user=course_enrollment.user, user_email=course_enrollment.user.email, order_number=order_number) to_email = [settings.PAYMENT_SUPPORT_EMAIL] from_email = microsite.get_value('payment_support_email', settings.PAYMENT_SUPPORT_EMAIL) try: send_mail(subject, message, from_email, to_email, fail_silently=False) except Exception as exception: # pylint: disable=broad-except err_str = ('Failed sending email to billing to request a refund for verified certificate' ' (User {user}, Course {course}, CourseEnrollmentID {ce_id}, Order #{order})\n{exception}') log.error(err_str.format( user=course_enrollment.user, course=course_enrollment.course_id, ce_id=course_enrollment.id, order=order_number, exception=exception, )) return target_cert @classmethod @transaction.atomic def add_to_order(cls, order, course_id, cost, mode, currency='usd'): """ Add a CertificateItem to an order Returns the CertificateItem object after saving `order` - an order that this item should be added to, generally the cart order `course_id` - the course that we would like to purchase as a CertificateItem `cost` - the amount the user will be paying for this CertificateItem `mode` - the course mode that this certificate is going to be issued for This item also creates a new enrollment if none exists for this user and this course. Example Usage: cart = Order.get_cart_for_user(user) CertificateItem.add_to_order(cart, 'edX/Test101/2013_Fall', 30, 'verified') """ super(CertificateItem, cls).add_to_order(order, course_id, cost, currency=currency) course_enrollment = CourseEnrollment.get_or_create_enrollment(order.user, course_id) # do some validation on the enrollment mode valid_modes = CourseMode.modes_for_course_dict(course_id) if mode in valid_modes: mode_info = valid_modes[mode] else: msg = u"Mode {mode} does not exist for {course_id}".format(mode=mode, course_id=course_id) log.error(msg) raise InvalidCartItem( _(u"Mode {mode} does not exist for {course_id}").format(mode=mode, course_id=course_id) ) item, _created = cls.objects.get_or_create( order=order, user=order.user, course_id=course_id, course_enrollment=course_enrollment, mode=mode, ) item.status = order.status item.qty = 1 item.unit_cost = cost item.list_price = cost course_name = modulestore().get_course(course_id).display_name # Translators: In this particular case, mode_name refers to a # particular mode (i.e. Honor Code Certificate, Verified Certificate, etc) # by which a user could enroll in the given course. item.line_desc = _("{mode_name} for course {course}").format( mode_name=mode_info.name, course=course_name ) item.currency = currency order.currency = currency order.save() item.save() return item def purchased_callback(self): """ When purchase goes through, activate and update the course enrollment for the correct mode """ self.course_enrollment.change_mode(self.mode) self.course_enrollment.activate() def additional_instruction_text(self): verification_reminder = "" refund_reminder_msg = _("You can unenroll in the course and receive a full refund for 14 days after the course " "start date. ") is_enrollment_mode_verified = self.course_enrollment.is_verified_enrollment() is_professional_mode_verified = self.course_enrollment.is_professional_enrollment() if is_enrollment_mode_verified: domain = microsite.get_value('SITE_NAME', settings.SITE_NAME) path = reverse('verify_student_verify_now', kwargs={'course_id': unicode(self.course_id)}) verification_url = "http://{domain}{path}".format(domain=domain, path=path) verification_reminder = _( "If you haven't verified your identity yet, please start the verification process ({verification_url})." ).format(verification_url=verification_url) if is_professional_mode_verified: refund_reminder_msg = _("You can unenroll in the course and receive a full refund for 2 days after the " "course start date. ") refund_reminder = _( "{refund_reminder_msg}" "To receive your refund, contact {billing_email}. " "Please include your order number in your email. " "Please do NOT include your credit card information." ).format( refund_reminder_msg=refund_reminder_msg, billing_email=settings.PAYMENT_SUPPORT_EMAIL ) # Need this to be unicode in case the reminder strings # have been translated and contain non-ASCII unicode return u"{verification_reminder} {refund_reminder}".format( verification_reminder=verification_reminder, refund_reminder=refund_reminder ) @classmethod def verified_certificates_count(cls, course_id, status): """Return a queryset of CertificateItem for every verified enrollment in course_id with the given status.""" return use_read_replica_if_available( CertificateItem.objects.filter(course_id=course_id, mode='verified', status=status).count()) # TODO combine these three methods into one @classmethod def verified_certificates_monetary_field_sum(cls, course_id, status, field_to_aggregate): """ Returns a Decimal indicating the total sum of field_to_aggregate for all verified certificates with a particular status. Sample usages: - status 'refunded' and field_to_aggregate 'unit_cost' will give the total amount of money refunded for course_id - status 'purchased' and field_to_aggregate 'service_fees' gives the sum of all service fees for purchased certificates etc """ query = use_read_replica_if_available( CertificateItem.objects.filter(course_id=course_id, mode='verified', status=status)).aggregate(Sum(field_to_aggregate))[field_to_aggregate + '__sum'] if query is None: return Decimal(0.00) else: return query @classmethod def verified_certificates_contributing_more_than_minimum(cls, course_id): return use_read_replica_if_available( CertificateItem.objects.filter( course_id=course_id, mode='verified', status='purchased', unit_cost__gt=(CourseMode.min_course_price_for_verified_for_currency(course_id, 'usd')))).count() def analytics_data(self): """Simple function used to construct analytics data for the OrderItem. If the CertificateItem is associated with a course, additional fields will be populated with course information. If there is a mode associated with the certificate, it is included in the SKU. Returns A dictionary containing analytics data for this OrderItem. """ data = super(CertificateItem, self).analytics_data() sku = data['sku'] if self.course_id != CourseKeyField.Empty: data['name'] = unicode(self.course_id) data['category'] = unicode(self.course_id.org) if self.mode: data['sku'] = sku + u'.' + unicode(self.mode) return data class DonationConfiguration(ConfigurationModel): """Configure whether donations are enabled on the site.""" class Meta(ConfigurationModel.Meta): app_label = "shoppingcart" class Donation(OrderItem): """A donation made by a user. Donations can be made for a specific course or to the organization as a whole. Users can choose the donation amount. """ class Meta(object): app_label = "shoppingcart" # Types of donations DONATION_TYPES = ( ("general", "A general donation"), ("course", "A donation to a particular course") ) # The type of donation donation_type = models.CharField(max_length=32, default="general", choices=DONATION_TYPES) # If a donation is made for a specific course, then store the course ID here. # If the donation is made to the organization as a whole, # set this field to CourseKeyField.Empty course_id = CourseKeyField(max_length=255, db_index=True) @classmethod @transaction.atomic def add_to_order(cls, order, donation_amount, course_id=None, currency='usd'): """Add a donation to an order. Args: order (Order): The order to add this donation to. donation_amount (Decimal): The amount the user is donating. Keyword Args: course_id (CourseKey): If provided, associate this donation with a particular course. currency (str): The currency used for the the donation. Raises: InvalidCartItem: The provided course ID is not valid. Returns: Donation """ # This will validate the currency but won't actually add the item to the order. super(Donation, cls).add_to_order(order, currency=currency) # Create a line item description, including the name of the course # if this is a per-course donation. # This will raise an exception if the course can't be found. description = cls._line_item_description(course_id=course_id) params = { "order": order, "user": order.user, "status": order.status, "qty": 1, "unit_cost": donation_amount, "currency": currency, "line_desc": description } if course_id is not None: params["course_id"] = course_id params["donation_type"] = "course" else: params["donation_type"] = "general" return cls.objects.create(**params) def purchased_callback(self): """Donations do not need to be fulfilled, so this method does nothing.""" pass def generate_receipt_instructions(self): """Provide information about tax-deductible donations in the receipt. Returns: tuple of (Donation, unicode) """ return self.pk_with_subclass, set([self._tax_deduction_msg()]) def additional_instruction_text(self, **kwargs): """Provide information about tax-deductible donations in the confirmation email. Returns: unicode """ return self._tax_deduction_msg() def _tax_deduction_msg(self): """Return the translated version of the tax deduction message. Returns: unicode """ return _( u"We greatly appreciate this generous contribution and your support of the {platform_name} mission. " u"This receipt was prepared to support charitable contributions for tax purposes. " u"We confirm that neither goods nor services were provided in exchange for this gift." ).format(platform_name=get_themed_value('PLATFORM_NAME', settings.PLATFORM_NAME)) @classmethod def _line_item_description(cls, course_id=None): """Create a line-item description for the donation. Includes the course display name if provided. Keyword Arguments: course_id (CourseKey) Raises: CourseDoesNotExistException: The course ID is not valid. Returns: unicode """ # If a course ID is provided, include the display name of the course # in the line item description. if course_id is not None: course = modulestore().get_course(course_id) if course is None: msg = u"Could not find a course with the ID '{course_id}'".format(course_id=course_id) log.error(msg) raise CourseDoesNotExistException( _(u"Could not find a course with the ID '{course_id}'").format(course_id=course_id) ) return _(u"Donation for {course}").format(course=course.display_name) # The donation is for the organization as a whole, not a specific course else: return _(u"Donation for {platform_name}").format(platform_name=get_themed_value('PLATFORM_NAME', settings.PLATFORM_NAME)) @property def single_item_receipt_context(self): return { 'receipt_has_donation_item': True, } def analytics_data(self): """Simple function used to construct analytics data for the OrderItem. If the donation is associated with a course, additional fields will be populated with course information. When no name or category is specified by the implementation, the platform name is used as a default value for required event fields, to declare that the Order is specific to the platform, rather than a specific product name or category. Returns A dictionary containing analytics data for this OrderItem. """ data = super(Donation, self).analytics_data() if self.course_id != CourseKeyField.Empty: data['name'] = unicode(self.course_id) data['category'] = unicode(self.course_id.org) else: data['name'] = get_themed_value('PLATFORM_NAME', settings.PLATFORM_NAME) data['category'] = get_themed_value('PLATFORM_NAME', settings.PLATFORM_NAME) return data @property def pdf_receipt_display_name(self): """ How to display this item on a PDF printed receipt file. """ return self._line_item_description(course_id=self.course_id)
zhenzhai/edx-platform
lms/djangoapps/shoppingcart/models.py
Python
agpl-3.0
90,694
[ "VisIt" ]
2bac78cbc02a6aee8b585df71e9d590eb934ccb2c320a978299cc65f7ff3a90e
""" Classes that define a component of a scaling model. Each class holds the parameters and relevant data, as a list of arrays, from which to calculate inverse scale factors and derivatives. The components are initialised without any data, which is added by setting the data dict. In order to update the internal data lists in order to calculate the scales and derivatives, the update_reflection_data method should be called, which can optionally be provided with selection arrays to split the data for blockwise/parallel calculations. The scaling algorithm makes use of the components in the following way. First, the data for all 'suitable' reflections are added to the components. Then, at different stages of the algorithm, selection lists are provided to select a subset of this data (e.g. a small subset to prepare the component for minimisation calculation, or a large subset for calculating the scales for all reflections). The selection lists typically come from the Ih_table datastructures so that the data in the components is split in the same way as the data in the Ih_table datastructure. """ from __future__ import annotations from scitbx import sparse from dials.array_family import flex from dials_scaling_ext import calculate_harmonic_tables_from_selections class ScaleComponentBase: """ Base scale component class. This defines an interface to access the parameters, the component of the inverse scale factor and it's derivatives with respect to the parameters. Scale components derived from the base class are designed to be instantiated by a ScalingModel class, by supplying an initial array of parameters and optionally the current estimated standard deviations. The relevant data from a reflection table is added later by a Scaler using the update_reflection_data method. This behaviour allows data to easily be added/changed after selecting subsets of the data. """ def __init__(self, initial_values, parameter_esds=None): """Set the initial parameter values, parameter esds and n_params.""" self._parameters = initial_values self._parameter_esds = parameter_esds self._n_params = len(self._parameters) self._var_cov = None self._n_refl = [] # store as a list, to allow holding of data in blocks self._parameter_restraints = None self._data = {} @property def data(self): """ Return a dictionary of reflection data relevant to the particular component. This is designed to be a dict of arrays which can be selected from when updating the component (i.e. selecting subsets). """ return self._data @data.setter def data(self, data): self._data = data @data.deleter def data(self): self._data = {} @property def parameter_restraints(self): """Restraint weights for the component parameters.""" return self._parameter_restraints @parameter_restraints.setter def parameter_restraints(self, restraints): assert restraints.size() == self.parameters.size() self._parameter_restraints = restraints @property def n_params(self): """Get the number of parameters of the component (read-only).""" return self._n_params @property def parameters(self): """Parameters of the component.""" return self._parameters @parameters.setter def parameters(self, new_parameters): assert len(new_parameters) == len( self._parameters ), f""" attempting to set a new set of parameters of different length than previous assignment: was {len(self._parameters)}, attempting {len(new_parameters)}""" self._parameters = new_parameters @property def free_parameters(self): return self._parameters @free_parameters.setter def free_parameters(self, parameters): self._parameters = parameters @property def parameter_esds(self): """Return the estimated standard deviations of the parameters.""" return self._parameter_esds @parameter_esds.setter def parameter_esds(self, esds): assert len(esds) == len(self.parameters) self._parameter_esds = esds @property def free_parameter_esds(self): """Return the estimated standard deviations of the parameters.""" return self._parameter_esds @free_parameter_esds.setter def free_parameter_esds(self, esds): assert len(esds) == len(self.free_parameters) self._parameter_esds = esds def calculate_restraints(self): """Calculate residual and gradient restraints for the component.""" return None def calculate_jacobian_restraints(self): """Calculate residual and jacobian restraints for the component.""" return None @property def var_cov_matrix(self): """Return the variance-covariance matrix of the parameters.""" return self._var_cov @var_cov_matrix.setter def var_cov_matrix(self, var_cov): self._var_cov = var_cov @property def n_refl(self): """Return a list of the number of reflections in each block.""" return self._n_refl def update_reflection_data(self, selection=None, block_selections=None): """ Update the internal data arrays. Use the data stored in self.data, optionally with a selection array or list of selections, to populate a list of internal arrays e.g n_refl, normalised_values etc. to allow scale and derivative calculations. If no selection arrays are provided, the internal arrays will be lists containing one array/value, depending on the data type needed for derivative and scale calculation. Args: selection: A flex.bool selection array to select a subset of the internal data. block_selections (list): A list of flex.size_t arrays to select subsets of the internal data. """ raise NotImplementedError() def calculate_scales_and_derivatives(self, block_id=0): """Calculate and return inverse scales and derivatives for a given block.""" raise NotImplementedError() def calculate_scales(self, block_id=0): """Calculate and return inverse scales for a given block.""" raise NotImplementedError() class SingleScaleFactor(ScaleComponentBase): """ A model component consisting of a single global scale parameter. The inverse scale factor for every reflection is the parameter value itself and the derivatives are therefore all 1.0. """ null_parameter_value = 1.0 def __init__(self, initial_values, parameter_esds=None): """Set the initial parameter values, parameter esds and n_params.""" assert ( len(initial_values) == 1 ), """ This model component can only hold a single parameter.""" super().__init__(initial_values, parameter_esds) @ScaleComponentBase.data.setter def data(self, data): """Set the data dict in the parent class.""" assert set(data.keys()) == {"id"}, set(data.keys()) self._data = data def update_reflection_data(self, selection=None, block_selections=None): """ Update the internal n_refl list. Use the data stored in self.data, optionally with a boolean selection array or list of flex.size_t index selections, to make a list of n_refl (of length 1 or len(block_selections)) by inspecting the size of the selection result, in order to allow scale and derivative calculations. Args: selection: Optional, a flex.bool selection array to select a subset of the internal data. block_selections (list): Optional, a list of flex.size_t arrays to select subsets of the internal data. """ data = self.data["id"] if selection: self._n_refl = [data.select(selection).size()] elif block_selections: self._n_refl = [data.select(sel).size() for sel in block_selections] else: self._n_refl = [data.size()] def calculate_scales_and_derivatives(self, block_id=0): """Calculate and return inverse scales and derivatives for a given block.""" scales = flex.double(self.n_refl[block_id], self._parameters[0]) derivatives = sparse.matrix(self.n_refl[block_id], 1) for i in range(self.n_refl[block_id]): derivatives[i, 0] = 1.0 return scales, derivatives def calculate_scales(self, block_id=0): """Calculate and return inverse scales for a given block.""" return flex.double(self.n_refl[block_id], self._parameters[0]) class SingleBScaleFactor(ScaleComponentBase): """ A model component for a single global B-factor parameter. The inverse scale factor for each reflection is given by S = exp(B/(2 * d^2)), the derivatives are S/(2 * d^2). """ null_parameter_value = 0.0 def __init__(self, initial_values, parameter_esds=None): """Set the initial parameter values, parameter esds and n_params.""" super().__init__(initial_values, parameter_esds) self._d_values = [] @property def d_values(self): """Return a list of arrays of d-values associated with this component.""" return self._d_values @ScaleComponentBase.data.setter def data(self, data): """Set the data dict in the parent class.""" assert set(data.keys()) == {"d"}, set(data.keys()) self._data = data def update_reflection_data(self, selection=None, block_selections=None): """ Update the internal n_refl and d_values lists. Use the data stored in self.data, optionally with a boolean selection array or list of flex.size_t index selections, to make a lists of n_refl and d_value arrays (of length 1 or len(block_selections)), in order to allow scale and derivative calculations. Args: selection: Optional, a flex.bool selection array to select a subset of the internal data. block_selections (list): Optional, a list of flex.size_t arrays to select subsets of the internal data. """ data = self.data["d"] if selection: self._d_values = [data.select(selection)] elif block_selections: self._d_values = [data.select(sel) for sel in block_selections] else: self._d_values = [data] self._n_refl = [dvalues.size() for dvalues in self._d_values] def calculate_scales_and_derivatives(self, block_id=0): """Calculate and return inverse scales and derivatives for a given block.""" d_squared = self._d_values[block_id] * self._d_values[block_id] scales = flex.exp( flex.double(self._n_refl[block_id], self._parameters[0]) / (2.0 * d_squared) ) derivatives = sparse.matrix(self._n_refl[block_id], 1) for i in range(self._n_refl[block_id]): derivatives[i, 0] = scales[i] / (2.0 * d_squared[i]) return scales, derivatives def calculate_scales(self, block_id=0): """Calculate and return inverse scales for a given block.""" scales = flex.exp( flex.double(self._n_refl[block_id], self._parameters[0]) / (2.0 * (self._d_values[block_id] * self._d_values[block_id])) ) return scales class LinearDoseDecay(ScaleComponentBase): """ A model component for a decay that depends linearly on dose (see Holton Acta D 2019 D75 113-122) For the dose dependent, the form is I = I0 exp(-ln(2) D/ Hd). Parameterise this as linear function of rotation with an overall factor to refine. T(r) = exp(Cr/d - i.e. a one parameter model with the overall 'dose' proportional factor C. """ null_parameter_value = 0.0 def __init__(self, initial_values, parameter_esds=None): """Set the initial parameter values, parameter esds and n_params.""" super().__init__(initial_values, parameter_esds) self._d_values = [] self._x = [] # rotation/time @property def d_values(self): """Return a list of arrays of d-values associated with this component.""" return self._d_values @ScaleComponentBase.data.setter def data(self, data): """Set the data dict in the parent class.""" assert set(data.keys()) == {"d", "x"}, set(data.keys()) self._data = data def update_reflection_data(self, selection=None, block_selections=None): """ Update the internal n_refl and d_values lists. Use the data stored in self.data, optionally with a boolean selection array or list of flex.size_t index selections, to make a lists of n_refl and d_value arrays (of length 1 or len(block_selections)), in order to allow scale and derivative calculations. Args: selection: Optional, a flex.bool selection array to select a subset of the internal data. block_selections (list): Optional, a list of flex.size_t arrays to select subsets of the internal data. """ d = self.data["d"] x = self.data["x"] if selection: self._d_values = [d.select(selection)] self._x = [x.select(selection)] elif block_selections: self._d_values = [d.select(sel) for sel in block_selections] self._x = [x.select(sel) for sel in block_selections] else: self._d_values = [d] self._x = [x] self._n_refl = [dvalues.size() for dvalues in self._d_values] def calculate_scales_and_derivatives(self, block_id=0): """Calculate and return inverse scales and derivatives for a given block.""" scales = flex.exp( self._parameters[0] * self._x[block_id] / self._d_values[block_id] ) derivatives = sparse.matrix(self._n_refl[block_id], 1) for i in range(self._n_refl[block_id]): derivatives[i, 0] = scales[i] * ( self._x[block_id][i] / self._d_values[block_id][i] ) return scales, derivatives def calculate_scales(self, block_id=0): """Calculate and return inverse scales for a given block.""" scales = flex.exp( self._parameters[0] * self._x[block_id] / self._d_values[block_id] ) return scales class QuadraticDoseDecay(LinearDoseDecay): """ A model component for a decay that depends quadratically on dose For the dose dependent, the form is I = I0 exp(-C D/ d^2). Parameterise this as linear function of rotation with an overall factor to refine. T(r) = exp(Cr/d^2) - i.e. a one parameter model with the overall 'dose' proportional factor C. """ def calculate_scales_and_derivatives(self, block_id=0): """Calculate and return inverse scales and derivatives for a given block.""" scales = flex.exp( self._parameters[0] * self._x[block_id] / (self._d_values[block_id] ** 2) ) derivatives = sparse.matrix(self._n_refl[block_id], 1) for i in range(self._n_refl[block_id]): derivatives[i, 0] = scales[i] * ( self._x[block_id][i] / (self._d_values[block_id][i] ** 2) ) return scales, derivatives def calculate_scales(self, block_id=0): """Calculate and return inverse scales for a given block.""" scales = flex.exp( self._parameters[0] * self._x[block_id] / (self._d_values[block_id] ** 2) ) return scales class SHScaleComponent(ScaleComponentBase): """ A model component for a spherical harmonic absorption correction. This component uses a set of spherical harmonic functions to define an absorption surface for the crystal. A matrix of spherical harmonic coefficients for the data is stored in self._harmonic_values and is used to calculate the scales and derivatives. The scale is given by S = 1 + (sum_l sum_m Clm * Ylm) where Clm are the model parameters and Ylm are the spherical harmonic coefficients, the derivatives are then simply the coefficients Ylm. """ null_parameter_value = 0.0 coefficients_list = None # shared class variable to reduce memory load def __init__(self, initial_values, parameter_esds=None): """Set the initial parameter values, parameter esds and n_params.""" super().__init__(initial_values, parameter_esds) self._harmonic_values = [] self._matrices = [] @property def harmonic_values(self): """Return the matrix of harmonic coefficients for the internal data.""" return self._harmonic_values @property def sph_harm_table(self): """Return the matrix of the full harmonic coefficient for a reflection table.""" return self._data["sph_harm_table"] @sph_harm_table.setter def sph_harm_table(self, sht): self._data["sph_harm_table"] = sht @ScaleComponentBase.data.setter def data(self, data): """Set the data dict in the parent class.""" try: assert set(data.keys()) == {"s1_lookup", "s0_lookup"}, set(data.keys()) self._mode = "memory" except AssertionError: assert set(data.keys()) == {"sph_harm_table"}, set(data.keys()) self._mode = "speed" # Note: only speedier for small datasets self._data = data def calculate_restraints(self): """Calculate residual and gradient restraints for the component.""" residual = self.parameter_restraints * self._parameters * self._parameters gradient = 2.0 * self.parameter_restraints * self._parameters return residual, gradient def calculate_jacobian_restraints(self): """Calculate residual and jacobian restraints for the component.""" jacobian = sparse.matrix(self.n_params, self.n_params) for i in range(self.n_params): jacobian[i, i] = 1.0 return self._parameters, jacobian, self._parameter_restraints def update_reflection_data(self, selection=None, block_selections=None): """ Update the internal n_refl and harmonic_values lists. Use the harmonic values matrix stored in self.data, optionally with a boolean selection array or list of flex.size_t index selections, to make lists of n_refl and harmonic_value arrays (of length 1 or len(block_selections)), in order to allow scale and derivative calculations. Args: selection: Optional, a flex.bool selection array to select a subset of the internal data. block_selections (list): Optional, a list of flex.size_t arrays to select subsets of the internal data. """ if self._mode == "speed": self._update_reflection_data_speedmode(selection, block_selections) elif self._mode == "memory": self._update_reflection_data_memorymode(selection, block_selections) else: raise ValueError def _update_reflection_data_memorymode(self, selection=None, block_selections=None): if len(self.coefficients_list) != self.n_params: self.coefficients_list = self.coefficients_list[0 : self.n_params] # modify only for this instance, only needs to be done once per instance. if selection: n0 = self.data["s0_lookup"].select(selection) n1 = self.data["s1_lookup"].select(selection) values, matrix = calculate_harmonic_tables_from_selections( n0, n1, self.coefficients_list ) self._harmonic_values = [values] self._matrices = [matrix] elif block_selections: self._harmonic_values = [] self._matrices = [] for sel in block_selections: n0 = self.data["s0_lookup"].select(sel) n1 = self.data["s1_lookup"].select(sel) values, matrix = calculate_harmonic_tables_from_selections( n0, n1, self.coefficients_list ) self._harmonic_values.append(values) self._matrices.append(matrix) else: n0 = self.data["s0_lookup"] n1 = self.data["s1_lookup"] values, matrix = calculate_harmonic_tables_from_selections( n0, n1, self.coefficients_list ) self._harmonic_values = [values] self._matrices = [matrix] self._n_refl = [val[0].size() for val in self._harmonic_values] def _update_reflection_data_speedmode(self, selection=None, block_selections=None): if selection: sel_sph_harm_table = self.data["sph_harm_table"].select_columns( selection.iselection() ) self._harmonic_values = [sel_sph_harm_table.transpose()] elif block_selections: self._harmonic_values = [] for sel in block_selections: block_sph_harm_table = self.data["sph_harm_table"].select_columns(sel) self._harmonic_values.append(block_sph_harm_table.transpose()) else: self._harmonic_values = [self.data["sph_harm_table"].transpose()] self._n_refl = [val.n_rows for val in self._harmonic_values] def calculate_scales(self, block_id=0): """Calculate and return inverse scales for a given block.""" if self._mode == "speed": return self._calculate_scales_and_derivatives_speedmode( block_id, derivatives=False ) elif self._mode == "memory": return self._calculate_scales_and_derivatives_memorymode( block_id, derivatives=False ) def calculate_scales_and_derivatives(self, block_id=0): """Calculate and return inverse scales and derivatives for a given block.""" if self._mode == "speed": return self._calculate_scales_and_derivatives_speedmode(block_id) elif self._mode == "memory": return self._calculate_scales_and_derivatives_memorymode(block_id) def _calculate_scales_and_derivatives_speedmode(self, block_id, derivatives=True): abs_scale = flex.double( self._harmonic_values[block_id].n_rows, 1.0 ) # Unity term for i, col in enumerate(self._harmonic_values[block_id].cols()): abs_scale += flex.double(col.as_dense_vector() * self._parameters[i]) if derivatives: return abs_scale, self._harmonic_values[block_id] return abs_scale def _calculate_scales_and_derivatives_memorymode(self, block_id, derivatives=True): abs_scale = flex.double( self._harmonic_values[block_id][0].size(), 1.0 ) # Unity term for i, arr in enumerate( self._harmonic_values[block_id] ): # iterate over a list of arrays abs_scale += arr * self._parameters[i] if derivatives: return abs_scale, self._matrices[block_id] return abs_scale
dials/dials
algorithms/scaling/model/components/scale_components.py
Python
bsd-3-clause
23,435
[ "CRYSTAL" ]
0e6c71006df2d19f38d20f3ef6436a758857f91d7862bb1609210c3f4c292373
#!/usr/bin/env python ######################################################################## # File : dirac-wms-job-attributes # Author : Stuart Paterson ######################################################################## """ Retrieve attributes associated with the given DIRAC job """ from __future__ import print_function, absolute_import __RCSID__ = "$Id$" import DIRAC from DIRAC.Core.Base import Script Script.setUsageMessage('\n'.join([__doc__.split('\n')[1], 'Usage:', ' %s [option|cfgfile] ... JobID ...' % Script.scriptName, 'Arguments:', ' JobID: DIRAC Job ID'])) Script.parseCommandLine(ignoreErrors=True) args = Script.getPositionalArgs() if len(args) < 1: Script.showHelp() from DIRAC.Interfaces.API.Dirac import Dirac, parseArguments dirac = Dirac() exitCode = 0 errorList = [] for job in parseArguments(args): result = dirac.getJobAttributes(job, printOutput=True) if not result['OK']: errorList.append((job, result['Message'])) exitCode = 2 for error in errorList: print("ERROR %s: %s" % error) DIRAC.exit(exitCode)
fstagni/DIRAC
Interfaces/scripts/dirac-wms-job-attributes.py
Python
gpl-3.0
1,211
[ "DIRAC" ]
075142046c2ff52e7653ac022fe377ce4839f3bb7367f7abd4cf2b4b106a44c8
from __future__ import print_function import json import os import os.path import re import sys import warnings from collections import defaultdict from distutils.command.build_scripts import build_scripts as BuildScripts from distutils.command.sdist import sdist as SDist try: from setuptools import setup, find_packages from setuptools.command.build_py import build_py as BuildPy from setuptools.command.install_lib import install_lib as InstallLib from setuptools.command.install_scripts import install_scripts as InstallScripts except ImportError: print("Ansible now needs setuptools in order to build. Install it using" " your package manager (usually python-setuptools) or via pip (pip" " install setuptools).", file=sys.stderr) sys.exit(1) sys.path.insert(0, os.path.abspath('lib')) from ansible.release import __version__, __author__ SYMLINK_CACHE = 'SYMLINK_CACHE.json' def _find_symlinks(topdir, extension=''): """Find symlinks that should be maintained Maintained symlinks exist in the bin dir or are modules which have aliases. Our heuristic is that they are a link in a certain path which point to a file in the same directory. """ symlinks = defaultdict(list) for base_path, dirs, files in os.walk(topdir): for filename in files: filepath = os.path.join(base_path, filename) if os.path.islink(filepath) and filename.endswith(extension): target = os.readlink(filepath) if os.path.dirname(target) == '': link = filepath[len(topdir):] if link.startswith('/'): link = link[1:] symlinks[os.path.basename(target)].append(link) return symlinks def _cache_symlinks(symlink_data): with open(SYMLINK_CACHE, 'w') as f: json.dump(symlink_data, f) def _maintain_symlinks(symlink_type, base_path): """Switch a real file into a symlink""" try: # Try the cache first because going from git checkout to sdist is the # only time we know that we're going to cache correctly with open(SYMLINK_CACHE, 'r') as f: symlink_data = json.load(f) except (IOError, OSError) as e: # IOError on py2, OSError on py3. Both have errno if e.errno == 2: # SYMLINKS_CACHE doesn't exist. Fallback to trying to create the # cache now. Will work if we're running directly from a git # checkout or from an sdist created earlier. symlink_data = {'script': _find_symlinks('bin'), 'library': _find_symlinks('lib', '.py'), } # Sanity check that something we know should be a symlink was # found. We'll take that to mean that the current directory # structure properly reflects symlinks in the git repo if 'ansible-playbook' in symlink_data['script']['ansible']: _cache_symlinks(symlink_data) else: raise RuntimeError( "Pregenerated symlink list was not present and expected " "symlinks in ./bin were missing or broken. " "Perhaps this isn't a git checkout?" ) else: raise symlinks = symlink_data[symlink_type] for source in symlinks: for dest in symlinks[source]: dest_path = os.path.join(base_path, dest) if not os.path.islink(dest_path): try: os.unlink(dest_path) except OSError as e: if e.errno == 2: # File does not exist which is all we wanted pass os.symlink(source, dest_path) class BuildPyCommand(BuildPy): def run(self): BuildPy.run(self) _maintain_symlinks('library', self.build_lib) class BuildScriptsCommand(BuildScripts): def run(self): BuildScripts.run(self) _maintain_symlinks('script', self.build_dir) class InstallLibCommand(InstallLib): def run(self): InstallLib.run(self) _maintain_symlinks('library', self.install_dir) class InstallScriptsCommand(InstallScripts): def run(self): InstallScripts.run(self) _maintain_symlinks('script', self.install_dir) class SDistCommand(SDist): def run(self): # have to generate the cache of symlinks for release as sdist is the # only command that has access to symlinks from the git repo symlinks = {'script': _find_symlinks('bin'), 'library': _find_symlinks('lib', '.py'), } _cache_symlinks(symlinks) SDist.run(self) # Print warnings at the end because no one will see warnings before all the normal status # output if os.environ.get('_ANSIBLE_SDIST_FROM_MAKEFILE', False) != '1': warnings.warn('When setup.py sdist is run from outside of the Makefile,' ' the generated tarball may be incomplete. Use `make snapshot`' ' to create a tarball from an arbitrary checkout or use' ' `cd packaging/release && make release version=[..]` for official builds.', RuntimeWarning) def read_file(file_name): """Read file and return its contents.""" with open(file_name, 'r') as f: return f.read() def read_requirements(file_name): """Read requirements file as a list.""" reqs = read_file(file_name).splitlines() if not reqs: raise RuntimeError( "Unable to read requirements from the %s file" "That indicates this copy of the source code is incomplete." % file_name ) return reqs PYCRYPTO_DIST = 'pycrypto' def get_crypto_req(): """Detect custom crypto from ANSIBLE_CRYPTO_BACKEND env var. pycrypto or cryptography. We choose a default but allow the user to override it. This translates into pip install of the sdist deciding what package to install and also the runtime dependencies that pkg_resources knows about. """ crypto_backend = os.environ.get('ANSIBLE_CRYPTO_BACKEND', '').strip() if crypto_backend == PYCRYPTO_DIST: # Attempt to set version requirements return '%s >= 2.6' % PYCRYPTO_DIST return crypto_backend or None def substitute_crypto_to_req(req): """Replace crypto requirements if customized.""" crypto_backend = get_crypto_req() if crypto_backend is None: return req def is_not_crypto(r): CRYPTO_LIBS = PYCRYPTO_DIST, 'cryptography' return not any(r.lower().startswith(c) for c in CRYPTO_LIBS) return [r for r in req if is_not_crypto(r)] + [crypto_backend] def read_extras(): """Specify any extra requirements for installation.""" extras = dict() extra_requirements_dir = 'packaging/requirements' for extra_requirements_filename in os.listdir(extra_requirements_dir): filename_match = re.search(r'^requirements-(\w*).txt$', extra_requirements_filename) if not filename_match: continue extra_req_file_path = os.path.join(extra_requirements_dir, extra_requirements_filename) try: extras[filename_match.group(1)] = read_file(extra_req_file_path).splitlines() except RuntimeError: pass return extras def get_dynamic_setup_params(): """Add dynamically calculated setup params to static ones.""" return { # Retrieve the long description from the README 'long_description': read_file('README.rst'), 'install_requires': substitute_crypto_to_req( read_requirements('requirements.txt'), ), 'extras_require': read_extras(), } static_setup_params = dict( # Use the distutils SDist so that symlinks are not expanded # Use a custom Build for the same reason cmdclass={ 'build_py': BuildPyCommand, 'build_scripts': BuildScriptsCommand, 'install_lib': InstallLibCommand, 'install_scripts': InstallScriptsCommand, 'sdist': SDistCommand, }, name='ansible', version=__version__, description='Radically simple IT automation', author=__author__, author_email='info@ansible.com', url='https://ansible.com/', project_urls={ 'Bug Tracker': 'https://github.com/ansible/ansible/issues', 'CI: Shippable': 'https://app.shippable.com/github/ansible/ansible', 'Code of Conduct': 'https://docs.ansible.com/ansible/latest/community/code_of_conduct.html', 'Documentation': 'https://docs.ansible.com/ansible/', 'Mailing lists': 'https://docs.ansible.com/ansible/latest/community/communication.html#mailing-list-information', 'Source Code': 'https://github.com/ansible/ansible', }, license='GPLv3+', # Ansible will also make use of a system copy of python-six and # python-selectors2 if installed but use a Bundled copy if it's not. python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*', package_dir={'': 'lib'}, packages=find_packages('lib'), package_data={ '': [ 'executor/powershell/*.ps1', 'module_utils/csharp/*.cs', 'module_utils/csharp/*/*.cs', 'module_utils/powershell/*.psm1', 'module_utils/powershell/*/*.psm1', 'modules/windows/*.ps1', 'modules/windows/*/*.ps1', 'galaxy/data/*/*.*', 'galaxy/data/*/.*', 'galaxy/data/*/*/.*', 'galaxy/data/*/*/*.*', 'galaxy/data/*/tests/inventory', 'config/base.yml', 'config/module_defaults.yml', ], }, classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Console', 'Intended Audience :: Developers', 'Intended Audience :: Information Technology', 'Intended Audience :: System Administrators', 'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)', 'Natural Language :: English', 'Operating System :: POSIX', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Topic :: System :: Installation/Setup', 'Topic :: System :: Systems Administration', 'Topic :: Utilities', ], scripts=[ 'bin/ansible', 'bin/ansible-playbook', 'bin/ansible-pull', 'bin/ansible-doc', 'bin/ansible-galaxy', 'bin/ansible-console', 'bin/ansible-connection', 'bin/ansible-vault', 'bin/ansible-config', 'bin/ansible-inventory', ], data_files=[], # Installing as zip files would break due to references to __file__ zip_safe=False ) def main(): """Invoke installation process using setuptools.""" setup_params = dict(static_setup_params, **get_dynamic_setup_params()) ignore_warning_regex = ( r"Unknown distribution option: '(project_urls|python_requires)'" ) warnings.filterwarnings( 'ignore', message=ignore_warning_regex, category=UserWarning, module='distutils.dist', ) setup(**setup_params) warnings.resetwarnings() if __name__ == '__main__': main()
pgmillon/ansible
setup.py
Python
gpl-3.0
11,653
[ "Galaxy" ]
8affad23426a58a170a350dac53dbc48e60bb2add43e9175d5548fa849bfb71e
# This file is part of the exercise code repository accompanying # the book: Neuronal Dynamics (see http://neuronaldynamics.epfl.ch) # located at http://github.com/EPFL-LCN/neuronaldynamics-exercises. # This free software: you can redistribute it and/or modify it under # the terms of the GNU General Public License 2.0 as published by the # Free Software Foundation. You should have received a copy of the # GNU General Public License along with the repository. If not, # see http://www.gnu.org/licenses/. # Should you reuse and publish the code for your own purposes, # please cite the book or point to the webpage http://neuronaldynamics.epfl.ch. # Wulfram Gerstner, Werner M. Kistler, Richard Naud, and Liam Paninski. # Neuronal Dynamics: From Single Neurons to Networks and Models of Cognition. # Cambridge University Press, 2014. import brian2 as b2 import numpy as np import matplotlib.pyplot as plt import math import numpy ############### # Input Currents ############### def get_step_current(t_start, t_end, unit_time, amplitude, append_zero=True): """Creates a step current. If t_start == t_end, then a single entry in the values array is set to amplitude. Args: t_start (int): start of the step t_end (int): end of the step unit_time (Brian2 unit): unit of t_start and t_end. e.g. 0.1*brian2.ms amplitude (Quantity): amplitude of the step. e.g. 3.5*brian2.uamp append_zero (bool, optional): if true, 0Amp is appended at t_end+1. Without that trailing 0, Brian reads out the last value in the array (=amplitude) for all indices > t_end. Returns: TimedArray: Brian2.TimedArray """ assert isinstance(t_start, int), "t_start_ms must be of type int" assert isinstance(t_end, int), "t_end must be of type int" assert b2.units.fundamentalunits.have_same_dimensions(amplitude, b2.amp), \ "amplitude must have the dimension of current e.g. brian2.uamp" tmp_size = 1 + t_end # +1 for t=0 if append_zero: tmp_size += 1 tmp = np.zeros((tmp_size, 1)) * b2.amp tmp[t_start: t_end + 1, 0] = amplitude curr = b2.TimedArray(tmp, dt=1. * unit_time) return curr def get_ramp_current(t_start, t_end, unit_time, amplitude_start, amplitude_end, append_zero=True): """Creates a ramp current. If t_start == t_end, then ALL entries are 0. Args: t_start (int): start of the ramp t_end (int): end of the ramp unit_time (Brian2 unit): unit of t_start and t_end. e.g. 0.1*brian2.ms amplitude_start (Quantity): amplitude of the ramp at t_start. e.g. 3.5*brian2.uamp amplitude_end (Quantity): amplitude of the ramp at t_end. e.g. 4.5*brian2.uamp append_zero (bool, optional): if true, 0Amp is appended at t_end+1. Without that trailing 0, Brian reads out the last value in the array (=amplitude_end) for all indices > t_end. Returns: TimedArray: Brian2.TimedArray """ assert isinstance(t_start, int), "t_start_ms must be of type int" assert isinstance(t_end, int), "t_end must be of type int" assert b2.units.fundamentalunits.have_same_dimensions(amplitude_start, b2.amp), \ "amplitude must have the dimension of current e.g. brian2.uamp" assert b2.units.fundamentalunits.have_same_dimensions(amplitude_end, b2.amp), \ "amplitude must have the dimension of current e.g. brian2.uamp" tmp_size = 1 + t_end # +1 for t=0 if append_zero: tmp_size += 1 tmp = np.zeros((tmp_size, 1)) * b2.amp if t_end > t_start: # if deltaT is zero, we return a zero current slope = (amplitude_end - amplitude_start) / float((t_end - t_start)) ramp = [amplitude_start + t * slope for t in range(0, (t_end - t_start) + 1)] tmp[t_start: t_end + 1, 0] = ramp curr = b2.TimedArray(tmp, dt=1. * unit_time) return curr def get_sinusoidal_current(t_start, t_end, unit_time, amplitude, frequency, direct_current, phase_offset=0., append_zero=True): """Creates a sinusoidal current. If t_start == t_end, then ALL entries are 0. Args: t_start (int): start of the sine wave t_end (int): end of the sine wave unit_time (Quantity, Time): unit of t_start and t_end. e.g. 0.1*brian2.ms amplitude (Quantity, Current): maximum amplitude of the sinus e.g. 3.5*brian2.uamp frequency (Quantity, Hz): Frequency of the sine. e.g. 0.5*brian2.kHz direct_current(Quantity, Current): DC-component (=offset) of the current phase_offset (float, Optional): phase at t_start. Default = 0. append_zero (bool, optional): if true, 0Amp is appended at t_end+1. Without that trailing 0, Brian reads out the last value in the array for all indices > t_end. Returns: TimedArray: Brian2.TimedArray """ assert isinstance(t_start, int), "t_start_ms must be of type int" assert isinstance(t_end, int), "t_end must be of type int" assert b2.units.fundamentalunits.have_same_dimensions(amplitude, b2.amp), \ "amplitude must have the dimension of current. e.g. brian2.uamp" assert b2.units.fundamentalunits.have_same_dimensions(direct_current, b2.amp), \ "direct_current must have the dimension of current. e.g. brian2.uamp" assert b2.units.fundamentalunits.have_same_dimensions(frequency, b2.Hz), \ "frequency must have the dimension of 1/Time. e.g. brian2.Hz" tmp_size = 1 + t_end # +1 for t=0 if append_zero: tmp_size += 1 tmp = np.zeros((tmp_size, 1)) * b2.amp if t_end > t_start: # if deltaT is zero, we return a zero current phi = range(0, (t_end - t_start) + 1) phi = phi * unit_time * frequency phi = phi * 2. * math.pi + phase_offset c = numpy.sin(phi) c = (direct_current + c * amplitude) tmp[t_start: t_end + 1, 0] = c curr = b2.TimedArray(tmp, dt=1. * unit_time) return curr def get_zero_current(): """ Returns a TimedArray with one entry: 0 Amp Returns: TimedArray """ return get_step_current(0, 0, b2.ms, 0 * b2.amp, append_zero=False) def get_spikes_current(t_spikes, unit_time, amplitude, append_zero=True): """Creates a two dimensional TimedArray wich has one column for each value in t_spikes. All values in each column are 0 except one, the spike time as specified in t_spikes is set to amplitude. Note: This function is provided to easily insert pulse currents into a cable. For other use of spike input, search the Brian2 documentation for SpikeGeneration. Args: t_spikes (int): list of spike times unit_time (Quantity, Time): unit of t_spikes . e.g. 1*brian2.ms amplitude (Quantity, Current): amplitude of the spike. All spikes have the sampe amplitude append_zero (bool, optional): if true, 0Amp is appended at t_end+1. Without that trailing 0, Brian reads out the last value in the array for all indices > t_end. Returns: TimedArray: Brian2.TimedArray """ assert isinstance(t_spikes, list), "t_spikes must be of type list" assert b2.units.fundamentalunits.have_same_dimensions(amplitude, b2.amp), \ "amplitude must have the dimension of current e.g. brian2.uamp" nr_spikes = len(t_spikes) t_max = max(t_spikes) tmp_size = 1 + t_max # +1 for t=0 if append_zero: tmp_size += 1 tmp = np.zeros((tmp_size, nr_spikes)) * b2.amp for i in range(nr_spikes): tmp[t_spikes[i], i] = amplitude curr = b2.TimedArray(tmp, dt=1. * unit_time) return curr def plot_step_current_example(): """ Example for get_step_current. """ current = get_step_current(10, 30, b2.ms, amplitude=5. * b2.uA, append_zero=False) data = current.values[:, 0] / b2.uamp plt.plot(data, "ro", markersize=10, fillstyle="full") plt.title("get_step_current(10, 30, b2.ms, amplitude=5.*b2.uA, append_zero=False") plt.xlabel("index") plt.ylabel("value") def plot_ramp_current_example(): """ Example for get_ramp_current """ current = get_ramp_current(10, 25, b2.ms, 20 * b2.uamp, 50 * b2.uamp, append_zero=True) data = current.values[:, 0] / b2.uamp plt.plot(data, lw=3) plt.title("get_ramp_current(10, 25, b2.ms, 20*b2.uamp, 50*b2.uamp, append_zero=True)") plt.xlabel("index") plt.ylabel("value") def plot_sinusoidal_current_example(): """ Example for get_sinusoidal_current """ current = get_sinusoidal_current( 100, 1100, b2.us, amplitude=2 * b2.uamp, frequency=1.5 * b2.kHz, direct_current=1.5 * b2.uamp, phase_offset=math.pi / 6, append_zero=False) data = current.values[:, 0] / b2.uamp plt.plot(data, lw=3) plt.title("get_sinusoidal_current(100, 1100, b2.us, amplitude=2*b2.uamp, frequency=1.5*b2.kHz, \n" "direct_current=1.5*b2.uamp, phase_offset=pi/6, append_zero=False)") plt.xlabel("index") plt.ylabel("value") def getting_started(): # plot examples plot_step_current_example() plt.show() plot_ramp_current_example() plt.show() plot_sinusoidal_current_example() plt.show() if __name__ == "__main__": getting_started()
EPFL-LCN/neuronaldynamics-exercises
neurodynex3/tools/input_factory.py
Python
gpl-2.0
9,305
[ "Brian" ]
41c1b7aa8a389c39a64e1a84160b7145cf4d17eac925c90876ffdd0ae293f409
# # Gramps - a GTK+/GNOME based genealogy program # # Copyright (C) 2002-2007 Donald N. Allingham # Copyright (C) 2007-2008 Brian G. Matherly # Copyright (C) 2011 Tim G L Lyons # Copyright (C) 2011 Doug Blank <doug.blank@gmail.com> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # # $Id$ """ Package providing filter rules for GRAMPS. """ from ._disconnected import Disconnected from ._everyone import Everyone from ._familywithincompleteevent import FamilyWithIncompleteEvent from ._hasaddress import HasAddress from ._hasalternatename import HasAlternateName from ._hasassociation import HasAssociation from ._hasattribute import HasAttribute from ._hasbirth import HasBirth from ._hascitation import HasCitation from ._hascommonancestorwith import HasCommonAncestorWith from ._hascommonancestorwithfiltermatch import HasCommonAncestorWithFilterMatch from ._hasdeath import HasDeath from ._hasevent import HasEvent from ._hasfamilyattribute import HasFamilyAttribute from ._hasfamilyevent import HasFamilyEvent from ._hasgallery import HavePhotos from ._hasidof import HasIdOf from ._haslds import HasLDS from ._hasnameof import HasNameOf from ._hasnameorigintype import HasNameOriginType from ._hasnametype import HasNameType from ._hasnickname import HasNickname from ._hasnote import HasNote from ._hasnotematchingsubstringof import HasNoteMatchingSubstringOf from ._hasnoteregexp import HasNoteRegexp from ._hasrelationship import HasRelationship from ._hassourcecount import HasSourceCount from ._hassourceof import HasSourceOf from ._hastag import HasTag from ._hastextmatchingregexpof import HasTextMatchingRegexpOf from ._hastextmatchingsubstringof import HasTextMatchingSubstringOf from ._hasunknowngender import HasUnknownGender from ._havealtfamilies import HaveAltFamilies from ._havechildren import HaveChildren from ._incompletenames import IncompleteNames from ._isancestorof import IsAncestorOf from ._isancestoroffiltermatch import IsAncestorOfFilterMatch from ._isbookmarked import IsBookmarked from ._ischildoffiltermatch import IsChildOfFilterMatch from ._isdefaultperson import IsDefaultPerson from ._isdescendantfamilyof import IsDescendantFamilyOf from ._isdescendantfamilyoffiltermatch import IsDescendantFamilyOfFilterMatch from ._isdescendantof import IsDescendantOf from ._isdescendantoffiltermatch import IsDescendantOfFilterMatch from ._isduplicatedancestorof import IsDuplicatedAncestorOf from ._isfemale import IsFemale from ._islessthannthgenerationancestorof import \ IsLessThanNthGenerationAncestorOf from ._islessthannthgenerationancestorofbookmarked import \ IsLessThanNthGenerationAncestorOfBookmarked from ._islessthannthgenerationancestorofdefaultperson import \ IsLessThanNthGenerationAncestorOfDefaultPerson from ._islessthannthgenerationdescendantof import \ IsLessThanNthGenerationDescendantOf from ._ismale import IsMale from ._ismorethannthgenerationancestorof import \ IsMoreThanNthGenerationAncestorOf from ._ismorethannthgenerationdescendantof import \ IsMoreThanNthGenerationDescendantOf from ._isparentoffiltermatch import IsParentOfFilterMatch from ._issiblingoffiltermatch import IsSiblingOfFilterMatch from ._isspouseoffiltermatch import IsSpouseOfFilterMatch from ._iswitness import IsWitness from ._matchesfilter import MatchesFilter from ._matcheseventfilter import MatchesEventFilter from ._matchessourceconfidence import MatchesSourceConfidence from ._missingparent import MissingParent from ._multiplemarriages import MultipleMarriages from ._nevermarried import NeverMarried from ._nobirthdate import NoBirthdate from ._nodeathdate import NoDeathdate from ._peopleprivate import PeoplePrivate from ._peoplepublic import PeoplePublic from ._personwithincompleteevent import PersonWithIncompleteEvent from ._probablyalive import ProbablyAlive from ._relationshippathbetween import RelationshipPathBetween from ._deeprelationshippathbetween import DeepRelationshipPathBetween from ._relationshippathbetweenbookmarks import RelationshipPathBetweenBookmarks from ._searchname import SearchName from ._regexpname import RegExpName from ._matchidof import MatchIdOf from ._regexpidof import RegExpIdOf from ._changedsince import ChangedSince from ._isrelatedwith import IsRelatedWith #------------------------------------------------------------------------- # # This is used by Custom Filter Editor tool # #------------------------------------------------------------------------- editor_rule_list = [ Everyone, IsFemale, HasUnknownGender, IsMale, IsDefaultPerson, IsBookmarked, HasAlternateName, HasAddress, HasAssociation, HasIdOf, HasLDS, HasNameOf, HasNameOriginType, HasNameType, HasNickname, HasRelationship, HasDeath, HasBirth, HasCitation, HasEvent, HasFamilyEvent, HasAttribute, HasFamilyAttribute, HasTag, HasSourceCount, HasSourceOf, HaveAltFamilies, HavePhotos, HaveChildren, IncompleteNames, NeverMarried, MultipleMarriages, NoBirthdate, NoDeathdate, PersonWithIncompleteEvent, FamilyWithIncompleteEvent, ProbablyAlive, PeoplePrivate, PeoplePublic, IsWitness, IsDescendantOf, IsDescendantFamilyOf, IsDescendantFamilyOfFilterMatch, IsLessThanNthGenerationAncestorOfDefaultPerson, IsDescendantOfFilterMatch, IsDuplicatedAncestorOf, IsLessThanNthGenerationDescendantOf, IsMoreThanNthGenerationDescendantOf, IsAncestorOf, IsAncestorOfFilterMatch, IsLessThanNthGenerationAncestorOf, IsLessThanNthGenerationAncestorOfBookmarked, IsMoreThanNthGenerationAncestorOf, HasCommonAncestorWith, HasCommonAncestorWithFilterMatch, MatchesFilter, MatchesEventFilter, MatchesSourceConfidence, MissingParent, IsChildOfFilterMatch, IsParentOfFilterMatch, IsSpouseOfFilterMatch, IsSiblingOfFilterMatch, RelationshipPathBetween, DeepRelationshipPathBetween, RelationshipPathBetweenBookmarks, HasTextMatchingSubstringOf, HasNote, HasNoteRegexp, HasNoteMatchingSubstringOf, RegExpIdOf, Disconnected, ChangedSince, IsRelatedWith, ]
Forage/Gramps
gramps/gen/filters/rules/person/__init__.py
Python
gpl-2.0
6,878
[ "Brian" ]
1738e92a9f58cd08ece692c910dd750bd960aadbb9dc23691c3ad9a8f419ef8d
''' Created on Aug 5, 2014 @author: gearsad ''' import vtk from math import * from SceneObject import SceneObject class Terrain(SceneObject): ''' A simple template for a terrain. Derived from http://vtk.org/gitweb?p=VTK.git;a=blob;f=Examples/Modelling/Python/expCos.py ''' terrainPoints = None def __init__(self, renderers, surfaceSize): ''' Initialize the terrain. This is derived from the expCos.py example on the vtk.org website, link is above. ''' # Call the parent constructor super(Terrain,self).__init__(renderers) # We create a 'surfaceSize' by 'surfaceSize' point plane to sample plane = vtk.vtkPlaneSource() plane.SetXResolution(surfaceSize) plane.SetYResolution(surfaceSize) # We transform the plane by a factor of 'surfaceSize' on X and Y transform = vtk.vtkTransform() transform.Scale(surfaceSize, surfaceSize, 1) transF = vtk.vtkTransformPolyDataFilter() transF.SetInputConnection(plane.GetOutputPort()) transF.SetTransform(transform) # Compute the function that we use for the height generation. # [Original comment] Note the unusual GetPolyDataInput() & GetOutputPort() methods. surfaceF = vtk.vtkProgrammableFilter() surfaceF.SetInputConnection(transF.GetOutputPort()) # [Original comment] The SetExecuteMethod takes a Python function as an argument # In here is where all the processing is done. def bessel(): input = surfaceF.GetPolyDataInput() numPts = input.GetNumberOfPoints() newPts = vtk.vtkPoints() derivs = vtk.vtkFloatArray() for i in range(0, numPts): x = input.GetPoint(i) x, z = x[:2] # Get the XY plane point, which we'll make an XZ plane point so that's it a ground surface - this is a convenient point to remap it... # Now do your surface construction here, which we'll just make an arbitrary wavy surface for now. y = sin(x / float(surfaceSize) * 6.282) * cos(z / float(surfaceSize) * 6.282) newPts.InsertPoint(i, x, y, z) derivs.InsertValue(i, y) surfaceF.GetPolyDataOutput().CopyStructure(input) surfaceF.GetPolyDataOutput().SetPoints(newPts) surfaceF.GetPolyDataOutput().GetPointData().SetScalars(derivs) surfaceF.SetExecuteMethod(bessel) # We warp the plane based on the scalar values calculated above warp = vtk.vtkWarpScalar() warp.SetInputConnection(surfaceF.GetOutputPort()) warp.XYPlaneOn() # Set the range of the colour mapper to the function min/max we used to generate the terrain. mapper = vtk.vtkPolyDataMapper() mapper.SetInputConnection(warp.GetOutputPort()) mapper.SetScalarRange(-1, 1) # Make our terrain wireframe so that it doesn't occlude the whole scene self.vtkActor.GetProperty().SetRepresentationToWireframe() # Finally assign this to the parent class actor so that it draws. self.vtkActor.SetMapper(mapper)
GearsAD/semisorted_arnerve
sandbox/bot_vis_platform_post3b/scene/Terrain.py
Python
mit
3,294
[ "VTK" ]
575305ef04ea835447f999b29eb3c33a5169a8ed1ea1e44210b99af4842d4a22
"""SALAMI Dataset Loader .. admonition:: Dataset Info :class: dropdown The SALAMI dataset contains Structural Annotations of a Large Amount of Music Information: the public portion contains over 2200 annotations of over 1300 unique tracks. NB: mirdata relies on the **corrected** version of the 2.0 annotations: Details can be found at https://github.com/bmcfee/salami-data-public/tree/hierarchy-corrections and https://github.com/DDMAL/salami-data-public/pull/15. For more details, please visit: https://github.com/DDMAL/salami-data-public """ import csv import os from typing import Optional, TextIO, Tuple from deprecated.sphinx import deprecated import librosa import numpy as np from smart_open import open from mirdata import annotations, core, download_utils, io, jams_utils BIBTEX = """@inproceedings{smith2011salami, title={Design and creation of a large-scale database of structural annotations.}, author={Smith, Jordan Bennett Louis and Burgoyne, John Ashley and Fujinaga, Ichiro and De Roure, David and Downie, J Stephen}, booktitle={12th International Society for Music Information Retrieval Conference}, year={2011}, series = {ISMIR}, }""" INDEXES = { "default": "2.0-corrected", "test": "2.0-corrected", "2.0-corrected": core.Index(filename="salami_index_2.0-corrected.json"), } REMOTES = { "annotations": download_utils.RemoteFileMetadata( filename="salami-data-public-hierarchy-corrections.zip", url="https://github.com/bmcfee/salami-data-public/archive/hierarchy-corrections.zip", checksum="194add2601c09a7279a7433288de81fd", ) } DOWNLOAD_INFO = """ Unfortunately the audio files of the Salami dataset are not available for download. If you have the Salami dataset, place the contents into a folder called Salami with the following structure: > Salami/ > salami-data-public-hierarchy-corrections/ > audio/ and copy the Salami folder to {} """ LICENSE_INFO = """ This data is released under a Creative Commons 0 license, effectively dedicating it to the public domain. More information about this dedication and your rights, please see the details here: http://creativecommons.org/publicdomain/zero/1.0/ and http://creativecommons.org/publicdomain/zero/1.0/legalcode. """ class Track(core.Track): """salami Track class Args: track_id (str): track id of the track Attributes: annotator_1_id (str): number that identifies annotator 1 annotator_1_time (str): time that the annotator 1 took to complete the annotation annotator_2_id (str): number that identifies annotator 1 annotator_2_time (str): time that the annotator 1 took to complete the annotation artist (str): song artist audio_path (str): path to the audio file broad_genre (str): broad genre of the song duration (float): duration of song in seconds genre (str): genre of the song sections_annotator1_lowercase_path (str): path to annotations in hierarchy level 1 from annotator 1 sections_annotator1_uppercase_path (str): path to annotations in hierarchy level 0 from annotator 1 sections_annotator2_lowercase_path (str): path to annotations in hierarchy level 1 from annotator 2 sections_annotator2_uppercase_path (str): path to annotations in hierarchy level 0 from annotator 2 source (str): dataset or source of song title (str): title of the song Cached Properties: sections_annotator_1_uppercase (SectionData): annotations in hierarchy level 0 from annotator 1 sections_annotator_1_lowercase (SectionData): annotations in hierarchy level 1 from annotator 1 sections_annotator_2_uppercase (SectionData): annotations in hierarchy level 0 from annotator 2 sections_annotator_2_lowercase (SectionData): annotations in hierarchy level 1 from annotator 2 """ def __init__( self, track_id, data_home, dataset_name, index, metadata, ): super().__init__( track_id, data_home, dataset_name, index, metadata, ) self.sections_annotator1_uppercase_path = self.get_path("annotator_1_uppercase") self.sections_annotator1_lowercase_path = self.get_path("annotator_1_lowercase") self.sections_annotator2_uppercase_path = self.get_path("annotator_2_uppercase") self.sections_annotator2_lowercase_path = self.get_path("annotator_2_lowercase") self.audio_path = self.get_path("audio") @property def source(self): return self._track_metadata.get("source") @property def annotator_1_id(self): return self._track_metadata.get("annotator_1_id") @property def annotator_2_id(self): return self._track_metadata.get("annotator_2_id") @property def duration(self): return self._track_metadata.get("duration") @property def title(self): return self._track_metadata.get("title") @property def artist(self): return self._track_metadata.get("artist") @property def annotator_1_time(self): return self._track_metadata.get("annotator_1_time") @property def annotator_2_time(self): return self._track_metadata.get("annotator_2_time") @property def broad_genre(self): return self._track_metadata.get("class") @property def genre(self): return self._track_metadata.get("genre") @core.cached_property def sections_annotator_1_uppercase(self) -> Optional[annotations.SectionData]: return load_sections(self.sections_annotator1_uppercase_path) @core.cached_property def sections_annotator_1_lowercase(self) -> Optional[annotations.SectionData]: return load_sections(self.sections_annotator1_lowercase_path) @core.cached_property def sections_annotator_2_uppercase(self) -> Optional[annotations.SectionData]: return load_sections(self.sections_annotator2_uppercase_path) @core.cached_property def sections_annotator_2_lowercase(self) -> Optional[annotations.SectionData]: return load_sections(self.sections_annotator2_lowercase_path) @property def audio(self) -> Tuple[np.ndarray, float]: """The track's audio Returns: * np.ndarray - audio signal * float - sample rate """ return load_audio(self.audio_path) def to_jams(self): """Get the track's data in jams format Returns: jams.JAMS: the track's data in jams format """ return jams_utils.jams_converter( audio_path=self.audio_path, multi_section_data=[ ( [ (self.sections_annotator_1_uppercase, 0), (self.sections_annotator_1_lowercase, 1), ], "annotator_1", ), ( [ (self.sections_annotator_2_uppercase, 0), (self.sections_annotator_2_lowercase, 1), ], "annotator_2", ), ], metadata=self._track_metadata, ) # no decorator here because of https://github.com/librosa/librosa/issues/1267 def load_audio(fpath: str) -> Tuple[np.ndarray, float]: """Load a Salami audio file. Args: fpath (str): path to audio file Returns: * np.ndarray - the mono audio signal * float - The sample rate of the audio file """ return librosa.load(fpath, sr=None, mono=True) @io.coerce_to_string_io def load_sections(fhandle: TextIO) -> annotations.SectionData: """Load salami sections data from a file Args: fhandle (str or file-like): File-like object or path to section annotation file Returns: SectionData: section data """ times = [] secs = [] reader = csv.reader(fhandle, delimiter="\t") for line in reader: times.append(float(line[0])) secs.append(line[1]) times = np.array(times) # type: ignore secs = np.array(secs) # type: ignore # remove sections with length == 0 times_revised = np.delete(times, np.where(np.diff(times) == 0)) secs_revised = np.delete(secs, np.where(np.diff(times) == 0)) return annotations.SectionData( np.array([times_revised[:-1], times_revised[1:]]).T, "s", list(secs_revised[:-1]), "open", ) @core.docstring_inherit(core.Dataset) class Dataset(core.Dataset): """ The salami dataset """ def __init__(self, data_home=None, version="default"): super().__init__( data_home, version, name="salami", track_class=Track, bibtex=BIBTEX, indexes=INDEXES, remotes=REMOTES, download_info=DOWNLOAD_INFO, license_info=LICENSE_INFO, ) @core.cached_property def _metadata(self): metadata_path = os.path.join( self.data_home, os.path.join( "salami-data-public-hierarchy-corrections", "metadata", "metadata.csv" ), ) try: with open(metadata_path, "r") as fhandle: reader = csv.reader(fhandle, delimiter=",") raw_data = [] for line in reader: if line != []: if line[0] == "SONG_ID": continue raw_data.append(line) except FileNotFoundError: raise FileNotFoundError("Metadata not found. Did you run .download()?") metadata_index = {} for line in raw_data: track_id = line[0] duration = None if line[5] != "": duration = float(line[5]) metadata_index[track_id] = { "source": line[1], "annotator_1_id": line[2], "annotator_2_id": line[3], "duration": duration, "title": line[7], "artist": line[8], "annotator_1_time": line[10], "annotator_2_time": line[11], "class": line[14], "genre": line[15], } return metadata_index @deprecated( reason="Use mirdata.datasets.salami.load_audio", version="0.3.4", ) def load_audio(self, *args, **kwargs): return load_audio(*args, **kwargs) @deprecated( reason="Use mirdata.datasets.salami.load_sections", version="0.3.4", ) def load_sections(self, *args, **kwargs): return load_sections(*args, **kwargs)
mir-dataset-loaders/mirdata
mirdata/datasets/salami.py
Python
bsd-3-clause
10,958
[ "VisIt" ]
77bc60345cfee1a7edf78639d548a61f6232c1bdfb61a9022d732219fc0795ba
""" codecs -- Python Codec Registry, API and helpers. Written by Marc-Andre Lemburg (mal@lemburg.com). (c) Copyright CNRI, All Rights Reserved. NO WARRANTY. """#" import __builtin__, sys ### Registry and builtin stateless codec functions try: from _codecs import * except ImportError, why: raise SystemError('Failed to load the builtin codecs: %s' % why) __all__ = ["register", "lookup", "open", "EncodedFile", "BOM", "BOM_BE", "BOM_LE", "BOM32_BE", "BOM32_LE", "BOM64_BE", "BOM64_LE", "BOM_UTF8", "BOM_UTF16", "BOM_UTF16_LE", "BOM_UTF16_BE", "BOM_UTF32", "BOM_UTF32_LE", "BOM_UTF32_BE", "CodecInfo", "Codec", "IncrementalEncoder", "IncrementalDecoder", "StreamReader", "StreamWriter", "StreamReaderWriter", "StreamRecoder", "getencoder", "getdecoder", "getincrementalencoder", "getincrementaldecoder", "getreader", "getwriter", "encode", "decode", "iterencode", "iterdecode", "strict_errors", "ignore_errors", "replace_errors", "xmlcharrefreplace_errors", "backslashreplace_errors", "register_error", "lookup_error"] ### Constants # # Byte Order Mark (BOM = ZERO WIDTH NO-BREAK SPACE = U+FEFF) # and its possible byte string values # for UTF8/UTF16/UTF32 output and little/big endian machines # # UTF-8 BOM_UTF8 = '\xef\xbb\xbf' # UTF-16, little endian BOM_LE = BOM_UTF16_LE = '\xff\xfe' # UTF-16, big endian BOM_BE = BOM_UTF16_BE = '\xfe\xff' # UTF-32, little endian BOM_UTF32_LE = '\xff\xfe\x00\x00' # UTF-32, big endian BOM_UTF32_BE = '\x00\x00\xfe\xff' if sys.byteorder == 'little': # UTF-16, native endianness BOM = BOM_UTF16 = BOM_UTF16_LE # UTF-32, native endianness BOM_UTF32 = BOM_UTF32_LE else: # UTF-16, native endianness BOM = BOM_UTF16 = BOM_UTF16_BE # UTF-32, native endianness BOM_UTF32 = BOM_UTF32_BE # Old broken names (don't use in new code) BOM32_LE = BOM_UTF16_LE BOM32_BE = BOM_UTF16_BE BOM64_LE = BOM_UTF32_LE BOM64_BE = BOM_UTF32_BE ### Codec base classes (defining the API) class CodecInfo(tuple): """Codec details when looking up the codec registry""" # Private API to allow Python to blacklist the known non-Unicode # codecs in the standard library. A more general mechanism to # reliably distinguish test encodings from other codecs will hopefully # be defined for Python 3.5 # # See http://bugs.python.org/issue19619 _is_text_encoding = True # Assume codecs are text encodings by default def __new__(cls, encode, decode, streamreader=None, streamwriter=None, incrementalencoder=None, incrementaldecoder=None, name=None, _is_text_encoding=None): self = tuple.__new__(cls, (encode, decode, streamreader, streamwriter)) self.name = name self.encode = encode self.decode = decode self.incrementalencoder = incrementalencoder self.incrementaldecoder = incrementaldecoder self.streamwriter = streamwriter self.streamreader = streamreader if _is_text_encoding is not None: self._is_text_encoding = _is_text_encoding return self def __repr__(self): return "<%s.%s object for encoding %s at 0x%x>" % (self.__class__.__module__, self.__class__.__name__, self.name, id(self)) class Codec: """ Defines the interface for stateless encoders/decoders. The .encode()/.decode() methods may use different error handling schemes by providing the errors argument. These string values are predefined: 'strict' - raise a ValueError error (or a subclass) 'ignore' - ignore the character and continue with the next 'replace' - replace with a suitable replacement character; Python will use the official U+FFFD REPLACEMENT CHARACTER for the builtin Unicode codecs on decoding and '?' on encoding. 'xmlcharrefreplace' - Replace with the appropriate XML character reference (only for encoding). 'backslashreplace' - Replace with backslashed escape sequences (only for encoding). The set of allowed values can be extended via register_error. """ def encode(self, input, errors='strict'): """ Encodes the object input and returns a tuple (output object, length consumed). errors defines the error handling to apply. It defaults to 'strict' handling. The method may not store state in the Codec instance. Use StreamWriter for codecs which have to keep state in order to make encoding efficient. The encoder must be able to handle zero length input and return an empty object of the output object type in this situation. """ raise NotImplementedError def decode(self, input, errors='strict'): """ Decodes the object input and returns a tuple (output object, length consumed). input must be an object which provides the bf_getreadbuf buffer slot. Python strings, buffer objects and memory mapped files are examples of objects providing this slot. errors defines the error handling to apply. It defaults to 'strict' handling. The method may not store state in the Codec instance. Use StreamReader for codecs which have to keep state in order to make decoding efficient. The decoder must be able to handle zero length input and return an empty object of the output object type in this situation. """ raise NotImplementedError class IncrementalEncoder(object): """ An IncrementalEncoder encodes an input in multiple steps. The input can be passed piece by piece to the encode() method. The IncrementalEncoder remembers the state of the Encoding process between calls to encode(). """ def __init__(self, errors='strict'): """ Creates an IncrementalEncoder instance. The IncrementalEncoder may use different error handling schemes by providing the errors keyword argument. See the module docstring for a list of possible values. """ self.errors = errors self.buffer = "" def encode(self, input, final=False): """ Encodes input and returns the resulting object. """ raise NotImplementedError def reset(self): """ Resets the encoder to the initial state. """ def getstate(self): """ Return the current state of the encoder. """ return 0 def setstate(self, state): """ Set the current state of the encoder. state must have been returned by getstate(). """ class BufferedIncrementalEncoder(IncrementalEncoder): """ This subclass of IncrementalEncoder can be used as the baseclass for an incremental encoder if the encoder must keep some of the output in a buffer between calls to encode(). """ def __init__(self, errors='strict'): IncrementalEncoder.__init__(self, errors) self.buffer = "" # unencoded input that is kept between calls to encode() def _buffer_encode(self, input, errors, final): # Overwrite this method in subclasses: It must encode input # and return an (output, length consumed) tuple raise NotImplementedError def encode(self, input, final=False): # encode input (taking the buffer into account) data = self.buffer + input (result, consumed) = self._buffer_encode(data, self.errors, final) # keep unencoded input until the next call self.buffer = data[consumed:] return result def reset(self): IncrementalEncoder.reset(self) self.buffer = "" def getstate(self): return self.buffer or 0 def setstate(self, state): self.buffer = state or "" class IncrementalDecoder(object): """ An IncrementalDecoder decodes an input in multiple steps. The input can be passed piece by piece to the decode() method. The IncrementalDecoder remembers the state of the decoding process between calls to decode(). """ def __init__(self, errors='strict'): """ Creates an IncrementalDecoder instance. The IncrementalDecoder may use different error handling schemes by providing the errors keyword argument. See the module docstring for a list of possible values. """ self.errors = errors def decode(self, input, final=False): """ Decodes input and returns the resulting object. """ raise NotImplementedError def reset(self): """ Resets the decoder to the initial state. """ def getstate(self): """ Return the current state of the decoder. This must be a (buffered_input, additional_state_info) tuple. buffered_input must be a bytes object containing bytes that were passed to decode() that have not yet been converted. additional_state_info must be a non-negative integer representing the state of the decoder WITHOUT yet having processed the contents of buffered_input. In the initial state and after reset(), getstate() must return (b"", 0). """ return (b"", 0) def setstate(self, state): """ Set the current state of the decoder. state must have been returned by getstate(). The effect of setstate((b"", 0)) must be equivalent to reset(). """ class BufferedIncrementalDecoder(IncrementalDecoder): """ This subclass of IncrementalDecoder can be used as the baseclass for an incremental decoder if the decoder must be able to handle incomplete byte sequences. """ def __init__(self, errors='strict'): IncrementalDecoder.__init__(self, errors) self.buffer = "" # undecoded input that is kept between calls to decode() def _buffer_decode(self, input, errors, final): # Overwrite this method in subclasses: It must decode input # and return an (output, length consumed) tuple raise NotImplementedError def decode(self, input, final=False): # decode input (taking the buffer into account) data = self.buffer + input (result, consumed) = self._buffer_decode(data, self.errors, final) # keep undecoded input until the next call self.buffer = data[consumed:] return result def reset(self): IncrementalDecoder.reset(self) self.buffer = "" def getstate(self): # additional state info is always 0 return (self.buffer, 0) def setstate(self, state): # ignore additional state info self.buffer = state[0] # # The StreamWriter and StreamReader class provide generic working # interfaces which can be used to implement new encoding submodules # very easily. See encodings/utf_8.py for an example on how this is # done. # class StreamWriter(Codec): def __init__(self, stream, errors='strict'): """ Creates a StreamWriter instance. stream must be a file-like object open for writing (binary) data. The StreamWriter may use different error handling schemes by providing the errors keyword argument. These parameters are predefined: 'strict' - raise a ValueError (or a subclass) 'ignore' - ignore the character and continue with the next 'replace'- replace with a suitable replacement character 'xmlcharrefreplace' - Replace with the appropriate XML character reference. 'backslashreplace' - Replace with backslashed escape sequences (only for encoding). The set of allowed parameter values can be extended via register_error. """ self.stream = stream self.errors = errors def write(self, object): """ Writes the object's contents encoded to self.stream. """ data, consumed = self.encode(object, self.errors) self.stream.write(data) def writelines(self, list): """ Writes the concatenated list of strings to the stream using .write(). """ self.write(''.join(list)) def reset(self): """ Flushes and resets the codec buffers used for keeping state. Calling this method should ensure that the data on the output is put into a clean state, that allows appending of new fresh data without having to rescan the whole stream to recover state. """ pass def seek(self, offset, whence=0): self.stream.seek(offset, whence) if whence == 0 and offset == 0: self.reset() def __getattr__(self, name, getattr=getattr): """ Inherit all other methods from the underlying stream. """ return getattr(self.stream, name) def __enter__(self): return self def __exit__(self, type, value, tb): self.stream.close() ### class StreamReader(Codec): def __init__(self, stream, errors='strict'): """ Creates a StreamReader instance. stream must be a file-like object open for reading (binary) data. The StreamReader may use different error handling schemes by providing the errors keyword argument. These parameters are predefined: 'strict' - raise a ValueError (or a subclass) 'ignore' - ignore the character and continue with the next 'replace'- replace with a suitable replacement character; The set of allowed parameter values can be extended via register_error. """ self.stream = stream self.errors = errors self.bytebuffer = "" # For str->str decoding this will stay a str # For str->unicode decoding the first read will promote it to unicode self.charbuffer = "" self.linebuffer = None def decode(self, input, errors='strict'): raise NotImplementedError def read(self, size=-1, chars=-1, firstline=False): """ Decodes data from the stream self.stream and returns the resulting object. chars indicates the number of characters to read from the stream. read() will never return more than chars characters, but it might return less, if there are not enough characters available. size indicates the approximate maximum number of bytes to read from the stream for decoding purposes. The decoder can modify this setting as appropriate. The default value -1 indicates to read and decode as much as possible. size is intended to prevent having to decode huge files in one step. If firstline is true, and a UnicodeDecodeError happens after the first line terminator in the input only the first line will be returned, the rest of the input will be kept until the next call to read(). The method should use a greedy read strategy meaning that it should read as much data as is allowed within the definition of the encoding and the given size, e.g. if optional encoding endings or state markers are available on the stream, these should be read too. """ # If we have lines cached, first merge them back into characters if self.linebuffer: self.charbuffer = "".join(self.linebuffer) self.linebuffer = None if chars < 0: # For compatibility with other read() methods that take a # single argument chars = size # read until we get the required number of characters (if available) while True: # can the request be satisfied from the character buffer? if chars >= 0: if len(self.charbuffer) >= chars: break # we need more data if size < 0: newdata = self.stream.read() else: newdata = self.stream.read(size) # decode bytes (those remaining from the last call included) data = self.bytebuffer + newdata try: newchars, decodedbytes = self.decode(data, self.errors) except UnicodeDecodeError, exc: if firstline: newchars, decodedbytes = self.decode(data[:exc.start], self.errors) lines = newchars.splitlines(True) if len(lines)<=1: raise else: raise # keep undecoded bytes until the next call self.bytebuffer = data[decodedbytes:] # put new characters in the character buffer self.charbuffer += newchars # there was no data available if not newdata: break if chars < 0: # Return everything we've got result = self.charbuffer self.charbuffer = "" else: # Return the first chars characters result = self.charbuffer[:chars] self.charbuffer = self.charbuffer[chars:] return result def readline(self, size=None, keepends=True): """ Read one line from the input stream and return the decoded data. size, if given, is passed as size argument to the read() method. """ # If we have lines cached from an earlier read, return # them unconditionally if self.linebuffer: line = self.linebuffer[0] del self.linebuffer[0] if len(self.linebuffer) == 1: # revert to charbuffer mode; we might need more data # next time self.charbuffer = self.linebuffer[0] self.linebuffer = None if not keepends: line = line.splitlines(False)[0] return line readsize = size or 72 line = "" # If size is given, we call read() only once while True: data = self.read(readsize, firstline=True) if data: # If we're at a "\r" read one extra character (which might # be a "\n") to get a proper line ending. If the stream is # temporarily exhausted we return the wrong line ending. if data.endswith("\r"): data += self.read(size=1, chars=1) line += data lines = line.splitlines(True) if lines: if len(lines) > 1: # More than one line result; the first line is a full line # to return line = lines[0] del lines[0] if len(lines) > 1: # cache the remaining lines lines[-1] += self.charbuffer self.linebuffer = lines self.charbuffer = None else: # only one remaining line, put it back into charbuffer self.charbuffer = lines[0] + self.charbuffer if not keepends: line = line.splitlines(False)[0] break line0withend = lines[0] line0withoutend = lines[0].splitlines(False)[0] if line0withend != line0withoutend: # We really have a line end # Put the rest back together and keep it until the next call self.charbuffer = "".join(lines[1:]) + self.charbuffer if keepends: line = line0withend else: line = line0withoutend break # we didn't get anything or this was our only try if not data or size is not None: if line and not keepends: line = line.splitlines(False)[0] break if readsize<8000: readsize *= 2 return line def readlines(self, sizehint=None, keepends=True): """ Read all lines available on the input stream and return them as list of lines. Line breaks are implemented using the codec's decoder method and are included in the list entries. sizehint, if given, is ignored since there is no efficient way to finding the true end-of-line. """ data = self.read() return data.splitlines(keepends) def reset(self): """ Resets the codec buffers used for keeping state. Note that no stream repositioning should take place. This method is primarily intended to be able to recover from decoding errors. """ self.bytebuffer = "" self.charbuffer = u"" self.linebuffer = None def seek(self, offset, whence=0): """ Set the input stream's current position. Resets the codec buffers used for keeping state. """ self.stream.seek(offset, whence) self.reset() def next(self): """ Return the next decoded line from the input stream.""" line = self.readline() if line: return line raise StopIteration def __iter__(self): return self def __getattr__(self, name, getattr=getattr): """ Inherit all other methods from the underlying stream. """ return getattr(self.stream, name) def __enter__(self): return self def __exit__(self, type, value, tb): self.stream.close() ### class StreamReaderWriter: """ StreamReaderWriter instances allow wrapping streams which work in both read and write modes. The design is such that one can use the factory functions returned by the codec.lookup() function to construct the instance. """ # Optional attributes set by the file wrappers below encoding = 'unknown' def __init__(self, stream, Reader, Writer, errors='strict'): """ Creates a StreamReaderWriter instance. stream must be a Stream-like object. Reader, Writer must be factory functions or classes providing the StreamReader, StreamWriter interface resp. Error handling is done in the same way as defined for the StreamWriter/Readers. """ self.stream = stream self.reader = Reader(stream, errors) self.writer = Writer(stream, errors) self.errors = errors def read(self, size=-1): return self.reader.read(size) def readline(self, size=None): return self.reader.readline(size) def readlines(self, sizehint=None): return self.reader.readlines(sizehint) def next(self): """ Return the next decoded line from the input stream.""" return self.reader.next() def __iter__(self): return self def write(self, data): return self.writer.write(data) def writelines(self, list): return self.writer.writelines(list) def reset(self): self.reader.reset() self.writer.reset() def seek(self, offset, whence=0): self.stream.seek(offset, whence) self.reader.reset() if whence == 0 and offset == 0: self.writer.reset() def __getattr__(self, name, getattr=getattr): """ Inherit all other methods from the underlying stream. """ return getattr(self.stream, name) # these are needed to make "with codecs.open(...)" work properly def __enter__(self): return self def __exit__(self, type, value, tb): self.stream.close() ### class StreamRecoder: """ StreamRecoder instances provide a frontend - backend view of encoding data. They use the complete set of APIs returned by the codecs.lookup() function to implement their task. Data written to the stream is first decoded into an intermediate format (which is dependent on the given codec combination) and then written to the stream using an instance of the provided Writer class. In the other direction, data is read from the stream using a Reader instance and then return encoded data to the caller. """ # Optional attributes set by the file wrappers below data_encoding = 'unknown' file_encoding = 'unknown' def __init__(self, stream, encode, decode, Reader, Writer, errors='strict'): """ Creates a StreamRecoder instance which implements a two-way conversion: encode and decode work on the frontend (the input to .read() and output of .write()) while Reader and Writer work on the backend (reading and writing to the stream). You can use these objects to do transparent direct recodings from e.g. latin-1 to utf-8 and back. stream must be a file-like object. encode, decode must adhere to the Codec interface, Reader, Writer must be factory functions or classes providing the StreamReader, StreamWriter interface resp. encode and decode are needed for the frontend translation, Reader and Writer for the backend translation. Unicode is used as intermediate encoding. Error handling is done in the same way as defined for the StreamWriter/Readers. """ self.stream = stream self.encode = encode self.decode = decode self.reader = Reader(stream, errors) self.writer = Writer(stream, errors) self.errors = errors def read(self, size=-1): data = self.reader.read(size) data, bytesencoded = self.encode(data, self.errors) return data def readline(self, size=None): if size is None: data = self.reader.readline() else: data = self.reader.readline(size) data, bytesencoded = self.encode(data, self.errors) return data def readlines(self, sizehint=None): data = self.reader.read() data, bytesencoded = self.encode(data, self.errors) return data.splitlines(1) def next(self): """ Return the next decoded line from the input stream.""" data = self.reader.next() data, bytesencoded = self.encode(data, self.errors) return data def __iter__(self): return self def write(self, data): data, bytesdecoded = self.decode(data, self.errors) return self.writer.write(data) def writelines(self, list): data = ''.join(list) data, bytesdecoded = self.decode(data, self.errors) return self.writer.write(data) def reset(self): self.reader.reset() self.writer.reset() def __getattr__(self, name, getattr=getattr): """ Inherit all other methods from the underlying stream. """ return getattr(self.stream, name) def __enter__(self): return self def __exit__(self, type, value, tb): self.stream.close() ### Shortcuts def open(filename, mode='rb', encoding=None, errors='strict', buffering=1): """ Open an encoded file using the given mode and return a wrapped version providing transparent encoding/decoding. Note: The wrapped version will only accept the object format defined by the codecs, i.e. Unicode objects for most builtin codecs. Output is also codec dependent and will usually be Unicode as well. Files are always opened in binary mode, even if no binary mode was specified. This is done to avoid data loss due to encodings using 8-bit values. The default file mode is 'rb' meaning to open the file in binary read mode. encoding specifies the encoding which is to be used for the file. errors may be given to define the error handling. It defaults to 'strict' which causes ValueErrors to be raised in case an encoding error occurs. buffering has the same meaning as for the builtin open() API. It defaults to line buffered. The returned wrapped file object provides an extra attribute .encoding which allows querying the used encoding. This attribute is only available if an encoding was specified as parameter. """ if encoding is not None: if 'U' in mode: # No automatic conversion of '\n' is done on reading and writing mode = mode.strip().replace('U', '') if mode[:1] not in set('rwa'): mode = 'r' + mode if 'b' not in mode: # Force opening of the file in binary mode mode = mode + 'b' file = __builtin__.open(filename, mode, buffering) if encoding is None: return file info = lookup(encoding) srw = StreamReaderWriter(file, info.streamreader, info.streamwriter, errors) # Add attributes to simplify introspection srw.encoding = encoding return srw def EncodedFile(file, data_encoding, file_encoding=None, errors='strict'): """ Return a wrapped version of file which provides transparent encoding translation. Strings written to the wrapped file are interpreted according to the given data_encoding and then written to the original file as string using file_encoding. The intermediate encoding will usually be Unicode but depends on the specified codecs. Strings are read from the file using file_encoding and then passed back to the caller as string using data_encoding. If file_encoding is not given, it defaults to data_encoding. errors may be given to define the error handling. It defaults to 'strict' which causes ValueErrors to be raised in case an encoding error occurs. The returned wrapped file object provides two extra attributes .data_encoding and .file_encoding which reflect the given parameters of the same name. The attributes can be used for introspection by Python programs. """ if file_encoding is None: file_encoding = data_encoding data_info = lookup(data_encoding) file_info = lookup(file_encoding) sr = StreamRecoder(file, data_info.encode, data_info.decode, file_info.streamreader, file_info.streamwriter, errors) # Add attributes to simplify introspection sr.data_encoding = data_encoding sr.file_encoding = file_encoding return sr ### Helpers for codec lookup def getencoder(encoding): """ Lookup up the codec for the given encoding and return its encoder function. Raises a LookupError in case the encoding cannot be found. """ return lookup(encoding).encode def getdecoder(encoding): """ Lookup up the codec for the given encoding and return its decoder function. Raises a LookupError in case the encoding cannot be found. """ return lookup(encoding).decode def getincrementalencoder(encoding): """ Lookup up the codec for the given encoding and return its IncrementalEncoder class or factory function. Raises a LookupError in case the encoding cannot be found or the codecs doesn't provide an incremental encoder. """ encoder = lookup(encoding).incrementalencoder if encoder is None: raise LookupError(encoding) return encoder def getincrementaldecoder(encoding): """ Lookup up the codec for the given encoding and return its IncrementalDecoder class or factory function. Raises a LookupError in case the encoding cannot be found or the codecs doesn't provide an incremental decoder. """ decoder = lookup(encoding).incrementaldecoder if decoder is None: raise LookupError(encoding) return decoder def getreader(encoding): """ Lookup up the codec for the given encoding and return its StreamReader class or factory function. Raises a LookupError in case the encoding cannot be found. """ return lookup(encoding).streamreader def getwriter(encoding): """ Lookup up the codec for the given encoding and return its StreamWriter class or factory function. Raises a LookupError in case the encoding cannot be found. """ return lookup(encoding).streamwriter def iterencode(iterator, encoding, errors='strict', **kwargs): """ Encoding iterator. Encodes the input strings from the iterator using an IncrementalEncoder. errors and kwargs are passed through to the IncrementalEncoder constructor. """ encoder = getincrementalencoder(encoding)(errors, **kwargs) for input in iterator: output = encoder.encode(input) if output: yield output output = encoder.encode("", True) if output: yield output def iterdecode(iterator, encoding, errors='strict', **kwargs): """ Decoding iterator. Decodes the input strings from the iterator using an IncrementalDecoder. errors and kwargs are passed through to the IncrementalDecoder constructor. """ decoder = getincrementaldecoder(encoding)(errors, **kwargs) for input in iterator: output = decoder.decode(input) if output: yield output output = decoder.decode("", True) if output: yield output ### Helpers for charmap-based codecs def make_identity_dict(rng): """ make_identity_dict(rng) -> dict Return a dictionary where elements of the rng sequence are mapped to themselves. """ res = {} for i in rng: res[i]=i return res def make_encoding_map(decoding_map): """ Creates an encoding map from a decoding map. If a target mapping in the decoding map occurs multiple times, then that target is mapped to None (undefined mapping), causing an exception when encountered by the charmap codec during translation. One example where this happens is cp875.py which decodes multiple character to \\u001a. """ m = {} for k,v in decoding_map.items(): if not v in m: m[v] = k else: m[v] = None return m ### error handlers try: strict_errors = lookup_error("strict") ignore_errors = lookup_error("ignore") replace_errors = lookup_error("replace") xmlcharrefreplace_errors = lookup_error("xmlcharrefreplace") backslashreplace_errors = lookup_error("backslashreplace") except LookupError: # In --disable-unicode builds, these error handler are missing strict_errors = None ignore_errors = None replace_errors = None xmlcharrefreplace_errors = None backslashreplace_errors = None # Tell modulefinder that using codecs probably needs the encodings # package _false = 0 if _false: import encodings ### Tests if __name__ == '__main__': # Make stdout translate Latin-1 output into UTF-8 output sys.stdout = EncodedFile(sys.stdout, 'latin-1', 'utf-8') # Have stdin translate Latin-1 input into UTF-8 input sys.stdin = EncodedFile(sys.stdin, 'utf-8', 'latin-1')
HiSPARC/station-software
user/python/Lib/codecs.py
Python
gpl-3.0
36,143
[ "FEFF" ]
d1117756af053b3a1080525a53b6031f61b4161eaa24d85d032f3b5dda49bfbf
#! /usr/bin/env python import sys import argparse import screed import math def ignore_at(iter): for item in iter: if item.startswith('@'): continue yield item def main(): parser = argparse.ArgumentParser() parser.add_argument('genome') parser.add_argument('samfile') parser.add_argument('-o', '--outfile', type=argparse.FileType('w'), default=sys.stdout) args = parser.parse_args() genome_dict = dict([ (record.name, record.sequence) for record in \ screed.open(args.genome) ]) n = 0 n_skipped = 0 n_rev = n_fwd = 0 for samline in ignore_at(open(args.samfile)): n += 1 if n % 100000 == 0: print >>sys.stderr, '...', n readname, flags, refname, refpos, _, _, _, _, _, seq = \ samline.split('\t')[:10] if refname == '*' or refpos == '*': # (don't count these as skipped) continue refpos = int(refpos) try: ref = genome_dict[refname][refpos-1:refpos+len(seq) - 1] except KeyError: print >>sys.stderr, "unknown refname: %s; ignoring (read %s)" % (refname, readname) n_skipped += 1 continue errors = [] for pos, (a, b) in enumerate(zip(ref, seq)): if a != b: # see http://bowtie-bio.sourceforge.net/bowtie2/manual.shtml#sam-output - '16' is the revcomp flag. if int(flags) & 16: pos = len(seq) - pos - 1 n_rev += 1 else: n_fwd += 1 errors.append(pos) print >>args.outfile, readname, ",".join(map(str, errors)) # avoid log errors via pseudocount n_fwd += 1 n_rev += 1 print >>sys.stderr, 'logratio of fwd to rev: %.2f' % (math.log(n_fwd / float(n_rev), 2)) if n_skipped / float(n) > .01: raise Exception, "Error: too many reads ignored! %d of %d" % \ (n_skipped, n) if __name__ == '__main__': main()
ctb/2014-streaming
pipeline/sam-scan.py
Python
bsd-3-clause
2,104
[ "Bowtie" ]
7b1a956f0e467b9b361d0d412f880a88f466f928fcc396be9a3ab262f96d7daf
# coding: utf-8 # # VAMPPrior Implementation in Tensorflow # # ## Overview # # Commonly, machine learning algorithms can be built from 4 different modules: data, a model, a learning algorithm, and an optimization procedure. Most of the times, these 4 modules can be freely combined with other ones. # # We structured this notebook with these 4 modules in mind. # # In Section 0 we do some maintenance - 0.1 defines the libraries that we use and 0.2 defines the hyper paramaters of our notebook, and in 0.3 we load the trainingsdata from mnist. # # In Section 1, we deal with the first module - data. In Section 1.1 we split the data to trainsgsbatches. # # In Section 2, we define the VAMPprior model. We first introduce two layers that we will use as building blocks for our model. In Section 2.1 we describe the gated linear layer and in Section 2.2 we will describe a linear layer. The linear layer $Wx+b$ could have been reused from the tf.contrib, however, we choose to add it for clarity. # # In Section 3 we define the third module - our learning objective. Our learning objective consists of two parts: the reconstruction error between z and x, and the KL divergence between the prior P(z) and the approximate posterior Q(z|x). The Prior p(z) is parametrized, i.e. it is not a standard gaussian anymore but a Mixture of Gaussians. # # In Section 4 we describe the optimization procedure. We use Adam, which is a momentum-based form of SDG. # # In Section 5 we perform the actual training. # # # ## Acknowledgements # * Original paper of this architecture: https://arxiv.org/abs/1705.07120 # * Original implementation: https://github.com/jmtomczak/vae_vampprior # # ## TODO # * Warmup not implemented # ## 0.1 Imports # In[ ]: import tensorflow as tf import numpy as np np.set_printoptions(precision=2) import logging import random import os from os.path import join as j logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) from IPython.display import Image # displaying images from tensorflow.python.client import timeline import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec import time # load trainings data from keras.datasets import mnist import itertools assert(tf.__version__=="1.2.0") # make sure to have the right tensorflow verson # ## 0.2 Load trainingsdata from mnist # In[ ]: (train_features, _), (test_features, _) = mnist.load_data() # shape = [60000, 28,28] and [10000, 28,28] features = np.concatenate((train_features,test_features), axis = 0) # shape = [70000, 28,28] features.astype('float32') features = features / 255.0 # normalize pixel values between 0 - 1 features = features.reshape(-1, 28*28) # => [70000, 784] num_examples = len(features)# 70000 x = features # our input data # # 0.3 Hyper Parameters # In[ ]: RANDOM_SEED = 0 # for reproducability TODO graph_outdir = "graph-vamp" model_name ="nn-vamp" learning_rate = 0.001 tf_beta = tf.Variable(0.01, trainable=False, dtype=tf.float32) x_dim = 28*28 # our mnist datapixel # encoder enc_l1_dim = 300 enc_l2_dim = 300 z_dim = 40 # decoder dec_l1_dim = 300 dec_l2_dim = 300 tf_global_step = tf.Variable(0, trainable=False) # VAMPPrior number_of_components = 500 # K in paper # training batch_size = 500 num_epochs = 2000 num_batches_per_epoch = int(num_examples / batch_size) num_training_steps = num_batches_per_epoch * num_epochs trainings_iterator = list(iter(zip( list(range(1, num_batches_per_epoch*num_epochs+1)), # current_step list(range(1, num_batches_per_epoch+1))*num_epochs, # current_batch list( # [0000 ... 1111 ... 2222 ...] itertools.chain(*[[e]*num_batches_per_epoch for e in range(1,num_epochs+1)]) ) # current_epoch ))) save_checkpoint_after_each_step = num_batches_per_epoch print_loss_after_each_step = num_batches_per_epoch / 10 # print 10 status per epoch # # 1 Data # # 1.1 Create trainings batches # In[ ]: # generate trainings data print("Total trainingsteps:%i"%len(trainings_iterator)) print("Preparing %i batches..."%num_batches_per_epoch) data = {} for batch_id, start_index in enumerate(range(0,num_examples, batch_size)): data[batch_id+1]=x[start_index:start_index+batch_size] # [200, 784] print("done") # # 2 Model # ## 2.1 Gated Dense Layer # * input: [batch_size, input_dimension] # * output: [batch_size, output_dimension] # * Paper: https://arxiv.org/abs/1612.08083 # In[ ]: def gated_dense_layer( layer_input, # x [batch_size, input_dimension] in_dim, # [input_dimension] out_dim, # [output_dimension] name, activation_function = tf.nn.sigmoid, # sigma in paper dtype=tf.float32, weight_initializer=tf.contrib.layers.xavier_initializer(seed=RANDOM_SEED)): with tf.variable_scope(name): # gate - A in paper gate_weights = tf.get_variable( shape=[in_dim, out_dim], dtype=tf.float32, name="%s_gate_weights"%name, initializer=weight_initializer) gate_bias = tf.get_variable(shape=[out_dim], name="%s_gate_bias"%name) gate_linear = tf.nn.bias_add(value=tf.matmul(layer_input,gate_weights),bias=gate_bias) # normal dense - B in paper dense_weights = tf.get_variable( shape=[in_dim, out_dim], dtype=tf.float32, name="%s_dense_weights"%name, initializer=weight_initializer) dense_bias = tf.get_variable(shape=[out_dim], name="%s_dense_bias"%name) dense_linear = tf.nn.bias_add(value=tf.matmul(layer_input,dense_weights),bias=dense_bias) dense_activated = activation_function(dense_linear) layer_outputs = tf.multiply(x=gate_linear, y=dense_activated, name="%s_outputs"%name) # H_0 = A * sigma(B) return layer_outputs # H_0 # ## 2.2 Linear Dense # * could be implemented with https://www.tensorflow.org/api_docs/python/tf/contrib/layers/fully_connected , but added it to make it more explicit what is happening # * [batch_size, in_dim] => [batch_size, out_dim] # In[ ]: def linear_layer( layer_input, in_dim, # [input_dimension] out_dim, # [output_dimension] name, weight_initializer=tf.contrib.layers.xavier_initializer(seed=RANDOM_SEED)): with tf.variable_scope(name): # gate - A in paper weights = tf.get_variable( shape=[in_dim, out_dim], dtype=tf.float32, name="%s_weights"%name, initializer=weight_initializer) bias = tf.get_variable(shape=[out_dim], name="%s_bias"%name) linear = tf.nn.bias_add(value=tf.matmul(layer_input,weights),bias=bias) return linear # # 2.3 VAMPPrior VAE Model # * https://arxiv.org/pdf/1312.6114.pdf # # ## Calculation Graph # # * dark green: everything that is connected with the VAE / the data (X, posterior) # * light green: everything that is connected to the components of the gaussian mixtures (U,VampPrior) # * orange: cannot backpropagate through here # * light blue: everything that is connected to the loss calculation # * $X$, $\hat{X}$ \ldots the data and the reconstructed data # * $U$ \ldots our components for the gaussian mixtures, in our case each component is a vector of size 784 # * $\mu_X$,$\Sigma_X$ the mean (a vector of size z_dim) and the diogonal covariance matrix after encoding data $X$. Since $\Sigma_X$ is diogonal, 1 row of the matrix represents the values. # * $\mu_U$,$\Sigma_U$ the mean (a vector of size z_dim) and the diogonal covariance matrix after encoding data $U$. # * Prior: $P_\lambda(z)$ # * Approximate Posterior: $Q(z|x)$ # # <img src="figures/vampprior-model.png" alt="Vamprrior graph" style="width: 750px;"/> # # ## Calculations step-by-step # # * we have a latent space $Z$ and an observable space $X$. # * $z \in Z$ is an a vector of space $Z$. In our case, $z$ is 'z_dim' dimensional vector. # * $x \in X$ is a vector of space $X$. In our case, $x$ are the 784 pixels that represent our image. # * p(z), the prior is the data distribution of z. # * p(x), is the data generating distribution of x. In other words, this are the 784 vectors that are likely to show some images. # * We want to have a mapping between $z$ and $x$, where $p(x|z)p(z)$ is as close as possible to $p(x)$. In other words, if we sample a $z$ from $p(z)$, we want to obtain an $x$ that is very likely under the data distributing generation $p(x)$. # * For most of the $z$'s that are sampled from $p(z)$ the resulting $x$'s would have very low probability. Consequently, we would need to sample an enormous amount of $z$'s to get a representative model. Learning such a mapping function is not very efficient. # * Instead of sampling many $z$'s we apply a more efficient strategy. We sample only $z$'s that we think are likely to generate true $x$. To obtain these likely $z$'s we introduce the posterior $p(z|x)$ which would give us. # * $p(z|x)$ sounds like a good idea, however, obtaining it is most of the times intractable. Therefore, we will use the approximate posterior $q(z|x)$. # * We don't know $p(z|x)$, but we know that we want $q(z|x)$ as close as possible to $p(z|x)$. Therefore, we know that the KL divergence between the two should be minimal. # * $KL(q(z|x) || p(z|x))$ # * TODO complete the train of thoughts. # # # ## Key insights: # # * if you have a function $x=f(z;\phi)$, and function $f$ is deterministic and parameters $\phi$ is fixed, then $f(z;\phi)$ is a random variable of space $X$. # * our goal is it to learn a function $x_{generated}=f(z;\phi)$ that maximizes the probability of $x_{generated}$ following the true data distribution $p(x)$. # # # # ## Reparametrization Trick # * If you would would randomly sample z from the distribution $\mathcal{N}(z;\mu, \Sigma)$, then we could not backpropagate. # * Instead, we draw a random $\epsilon$ from a standard gaussian multivariate distribution $\epsilon \sim \mathcal{N}(0,I)$, and re-shift it using $z=\mu + \epsilon * \sqrt{\Sigma}$. # * In our case, the layer learns $ln(\Sigma)$, therefore we first calculate the standard deviation $std = exp(ln(\frac{1}{2}\Sigma))$ # # In the following two figures, red means that gradients cannot flow back, because of the stochasticity of the nodes. If we would sample from the distribution $\mathcal{N}(z;\mu, \Sigma)$ directly, we could not backpropagate. # # <img src="figures/non-reparametrize.png" alt="Non Parametrized" style="width: 350px;"/> # # In constrast, after using the reparametrization trick $z=\mu + \epsilon * \sqrt{\Sigma}$ with $\epsilon \sim \mathcal{N}(0,1)$ we can backpropagate to $\Sigma$ and $\mu$. We still cannot backpropagate to $\epsilon$, but we also don't need to. # <img src="figures/reparametrize.png" alt="Non Parametrized" style="width: 350px;"/> # In[ ]: def variational_encoder(x): # encoder # q(z | x) with tf.variable_scope("VariationalEncoder"): enc_h1 = gated_dense_layer(layer_input=x, in_dim=x_dim, out_dim=enc_l1_dim, name="Layer1") # [batch_size, 300] enc_h2 = gated_dense_layer(layer_input=enc_h1, in_dim=enc_l1_dim, out_dim=enc_l2_dim, name="Layer2") # [batch_size, 300] z_q_mean = linear_layer(enc_h2, in_dim=enc_l2_dim, out_dim=z_dim, name="z_mean") # [batch_size, z_dim] z_q_logvar = linear_layer(enc_h2, in_dim=enc_l2_dim, out_dim=z_dim, name="z_logvar") # [batch_size, z_dim] return z_q_mean,z_q_logvar # In[ ]: def reparametrize(z_q_mean, z_q_logvar): # https://arxiv.org/pdf/1312.6114.pdf, Page 5 in the very bottom. # [bs, z_size] with tf.name_scope("Repararmetrization_Trick"): # this is the reparameterization trick # z_sigma = z_q_logvar # z_mu = z_q_mean epsilon = tf.random_normal( # draw some uniform random noise tf.shape(z_q_logvar), mean=0.0, stddev=1.0, dtype=tf.float32, name="random_variable_epsilon") std = tf.sqrt(tf.exp(z_q_logvar)) # e^(1/2 * ln(var)) = sqrt(var) z_q = z_q_mean + epsilon * std # [batch_size, z_size] return z_q # z_q means z~Q(z|x). In words: that z was sampled from the approximate posterior Q(z|x) # In[ ]: def variational_decoder(z_q): # decoder - p(x|z) with tf.variable_scope("VariationalDecoder"): # x_mean = p(x|z) dec_h1 = gated_dense_layer(layer_input=z_q, in_dim=z_dim, out_dim=dec_l1_dim, name="DecLayer1") # [batch_size, 300] dec_h2 = gated_dense_layer(layer_input=dec_h1, in_dim=dec_l1_dim, out_dim=dec_l2_dim, name="DecLayer2") # [batch_size, 300] x_mean = linear_layer(dec_h2, in_dim=dec_l2_dim, out_dim=x_dim, name="x_mean") # [batch_size, 784] # x_mean is our predicted x from the sampled z. so we have: P(x|z)P(z) # we want to maximize the probability that x_mean looks like our original x return x_mean # ## Model # In[ ]: # input with tf.variable_scope("Input_features_%s"%x_dim): x_input = tf.placeholder(shape=[None, x_dim], dtype=tf.float32, name="input_features")# shape: [batch_size, x_dim] with tf.variable_scope("Input_VAMPprior"): # prior: p(z) = 1/K sum_k N(mean_k, var_k) # mixture of Gaussians parameters component_means = tf.get_variable( # u in paper shape=[number_of_components, x_dim], #[500, 784] dtype=tf.float32, name="vamp_prior_weights", initializer=tf.contrib.layers.xavier_initializer(seed=RANDOM_SEED)) with tf.variable_scope("VAMPModel") as scope: # encoder z_q_mean,z_q_logvar = variational_encoder(x=x_input) # reparametrize z_q = reparametrize(z_q_mean=z_q_mean, z_q_logvar=z_q_logvar) # vamp_prior scope.reuse_variables() # share parameters for the encoder z_p_mean,z_p_logvar = variational_encoder(x=component_means) #number_components x z_dim print("z_p_mean", z_p_mean.shape) print("z_p_logvar", z_p_logvar.shape) # decoder x_mean = variational_decoder(z_q) # # 3. Objective # # Our objective consists of two parts. First, the reconstruction error $RE$ between $x$ and $x_{reconstructed}$ and the KL divergence between the two probability distributions $Q(z|x)$ (the approximate posterior) and $P(z)$ (the prior). # # The reconstruction error $RE$ is simply the sigmoid cross entropy loss, which is related to the negative log loss. # # For the KL divergence, we assume that our prior $P(z)$ follows a multivariate standard normal distribution, and our approximate posterior $Q(x|z)$ follows a diogonal normal standard distribution. To be able to calculate the KL divergence, we first need to calculate the natural logarithm $ln$ of the two probability density functions. # ## 3.2 ln_diagonal_normal # calculates the natrual logarithm of $ln$ of probability distribution following a multivariate diogonal normal distribution $$ln(\mathcal{N}_{diagonal}(z\,;\,\mu,\Sigma))$$. # # * $\Sigma \ldots$ is the coveriance matrix of size $[n,n]$ . Since we assume that $\Sigma$ is a diogonal matrix, we represent it as vector of size $n$. That is, it is one row of the matrix $z_q_logvar$ # * $\mu \ldots$ is the mean vector of size $n$, i.e. one row of the matrix $z_q_mean$. # * $z$ is our random variable vector of size $n$, in our case one row of the sampled posterior $z_q$ # # The probability density function for a multivariate Gaussian distribution is: # # $$ # \mathcal{N}_{diagonal} = \frac{1}{ (2\pi)^n * sqrt(det(\Sigma))} * \exp(-\frac{1}{2} (z-\mu)^T\,\Sigma^{-1}*(z-\mu)) # $$ # # Since $\Sigma$ is a diogonal matrix, there are two simplifications of this expression: # First, $$\Sigma^{-1} = 1/\Sigma$$, and the term in the $exp()$ function can then further be simplified to $$1/\Sigma \,(z-\mu)^2$$ # # This expression can further simplified by replacing it with a sum (column vector times row vector): # $$1/\Sigma \,(z-\mu)^2 = \sum_{i=0}^n \frac{1}{\sigma_i^2}(z_i-\mu_i)^2$$ # # # Second, the determinant of a diogonal matrix is simply the product of all elements, which allows us to express $$\frac{1}{sqrt(det(\Sigma))} = (\prod_{i=0}^n \sigma_i^2)^{-1/2} $$ # # Taking the logarithm $ln$ and pluggin in these simplicfications, we end up with: # # $$ # ln(\mathcal{N}_{diagonal}) = ln(\frac{1}{(2\pi)^n}) + ln((\prod_{i=0}^n \sigma_i^2)^{-1/2}) + ln(\exp(-\frac{1}{2} \sum_{i=0}^n \frac{1}{\sigma_i^2}(z_i-\mu_i)^2)) # $$ # # If we drop the first term $ln(\frac{1}{(2\pi)^n})$ because it cancels itself out in the KL divergence calculation, and evaluate the logarithm we will end up with: # # $$ # ln(\mathcal{N}_{diagonal}) = - \frac{1}{2} \sum_{i=0}^n ln(\sigma_i^2) - \frac{1}{2} \sum_{i=0}^n \frac{1}{\sigma_i^2}(z_i-\mu_i)^2) # $$ # # If we join the summation, we end up with: # # $$ # ln(\mathcal{N}_{diagonal}) = - \frac{1}{2} \sum_{i=0}^n ( ln(\sigma_i^2) + \frac{1}{\sigma_i^2}(z_i-\mu_i)^2) # $$ # # # In[ ]: # approximate posterior q(z|x) def ln_diagonal_normal(z, mean, log_sigma, axis=1): # [batch_size, 1] # our layer ouputs log_sigma. Therefor, we can use tf.exp(log_sigma) to get sigma. log_q_normal = -0.5 * ( log_sigma + tf.pow(z-mean,2) * tf.pow(tf.exp(log_sigma),-1) ) return tf.reduce_sum( log_q_normal, axis=axis ) #batc # ## 3.1 ln_vampprior # # * log_sum_exp https://hips.seas.harvard.edu/blog/2013/01/09/computing-log-sum-exp/ # # * here our $P(z)$ is a multivariate, gaussian mixture distribution, where $i$ is the element of our batch, and $K$ is the total number of components ('number_of_components'), $\mu_k$ and $\Sigma_k$ are the mean and the covariance matrix. As with the VAE, the covariance matrix is a diogonal matrix, or a vector of size 'z_dim'. # # $$ p(z_i) = \frac{1}{K} \sum_k \mathcal{N}(z_i;\mu_k, \Sigma_k)$$ # # * We want to calculate the diogonal log of each of the mixture components. To calculate this, we replicate $z_0$ to match the size rows $\mu$ and $\Sigma$, which is the number of components, and we replicate this process for all $z_i$ in the batch. We will end up with a $z$,$\mu$, and $\Sigma$ of the dimension [batch_size * num_components, z_dim]. The content of the array looks like this: # ``` # z_1 [mu_k=1, sigma_k=1] # first z for all K components # z_1 [mu_k=2, sigma_k=2] # .. # z_1 [mu_k=K, sigma_k=K] # z_2 [mu_k=1, sigma_k=1] # second z for all K components # .. # z_n [mu_k=K, sigma_k=K] # n^th z for all K components # ``` # * Then we calculate the diogonal log of a normal multivariate gaussian density function $\mathcal{N}(z_i;\mu_k, \Sigma_k)$ for all $i$ and $k$. After reshaping, our 'ln_components' has all the ln_components of one z example in one row. # # * We calculate the logsumexp for each example of the batch: # $$ln (\sum_{k=1}^K exp(ln(\mathcal{N}(z_i;\mu_k, \Sigma_k))))$$ # # * Finally, we subtract ln(K) from each row, because the gaussian mixture had ln(1/K * \sum ), which is $\ln(\sum) + (-ln(K)) # # # In[ ]: # the log loss of the vamp prior def ln_vampprior( z,# [batch_size,z_dim] mean, # [number_of_components, z_dim ] log_sigma): # [number_of_components, z_dim ] # for all z in batch, calculate the ln_diogonal_normal # reshape z to [batch_size*num_components, z_dim] z_tiled = tf.reshape(tf.tile(z, [1, number_of_components]), [-1, z_dim]) # reshape mu and log_sigma to [batch_size*num_components, z_dim] mean_tiled = tf.reshape(tf.tile(mean, [batch_size,1]), [-1, z_dim]) log_sigma_tiled = tf.reshape(tf.tile(log_sigma, [batch_size,1]), [-1, z_dim]) # calculate ln(N) for each component ln_components = ln_diagonal_normal( z_tiled, mean_tiled, log_sigma_tiled, axis=1 ) # => [batch_size*num_components, 1] ln_components_per_example = tf.reshape(ln_components, [-1, number_of_components]) # [batch_size, number_of_components] # calculate log_sum_exp for each ln_components_per_example, ln_sum_per_example = tf.reduce_logsumexp( ln_components_per_example , axis=1 ) ln_p_z = ln_sum_per_example - tf.log(tf.cast(number_of_components,tf.float32)) return ln_p_z # ## 3.3 KL Divergence # # This function calculates the KL divergence between two probability distributions. For details on the KL divergence see "Pattern Recognition in Machine Learning (PRML),CM Bishop, 2006, Section 1.2.2" # # There, Equation (1.3.5) states that, if we are given a finite number N of points drawn from the probability # distribution or probability density, then the expectation can be approximated as a finite sum over these points (for both continuous and discrete probabilities (PDF / PFM): # # $$ \mathbb{E}[f] \simeq \frac{1}{N} \sum_{n=1}^N f(x_n) $$ # # Then, in Section 1.6.1, Equation 1.113 we see that the KL divergence is given by: # # $$ KL( Q(z|x) || P(z) ) = - \int q(z) ln(\frac{p(z)}{q(z)})\,dz $$ # # If we evaluate ln in the fraction in the middle ($ln(\frac{a}{b})=ln(a)-ln(b)$), we end up with: # # $$ KL( Q(z|x) || P(z) ) = - \int q(z) [ln(p(z)) - ln(q(z))] \,dz $$ # # Note that we have swapped $Q$ with $P$ and that we use $z$ as random variable (instead of $x$). This can also be expressed as expectation. # # $$ KL( Q(z|x) || P(z) ) = - \mathbb{E}_{z \sim Q} [ln(p(z)) - ln(q(z))] $$ # # Which can be evaluated with equation (1.3.5) # # # Note that the constant term $ln(\frac{1}{ (2\pi)^n})$ was dropped because the occur in both of our probability distributions and then the cancel each other out. # # # In[ ]: def kl_divergence(log_p, log_q): # [batch_size, 1] return tf_beta * -1.0 * ( log_p - log_q ) # perhaps use reduce mean # ## 3.4 Loss # In[ ]: with tf.variable_scope("TraingsLoss"): # Reconstruction loss RE # RE # log_Bernoulli # in jakups paper this is the negative log loss [batch_size, 1] re_per_example = tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(logits=x_mean, labels=x_input), axis=1) # KL # VAMPprior p(z) ln_p_z_per_example = ln_vampprior(z=z_q, mean=z_p_mean, log_sigma=z_p_logvar) # p_lambda_z in paper [batch_size, 1] # approximate posterior q(z|x) ln_q_z_per_example = ln_diagonal_normal(z=z_q, mean=z_q_mean, log_sigma=z_q_logvar) # [batch_size, 1] kl_per_example = kl_divergence(log_p=ln_p_z_per_example, log_q=ln_q_z_per_example) # [batch_size, 1] # total_loss total_loss = tf.reduce_mean( re_per_example + kl_per_example) # batch_size # # 4 Optimization Procedure # In[ ]: with tf.variable_scope("OptimizationProcedure"): # https://www.tensorflow.org/api_docs/python/tf/train/AdamOptimizer with tf.variable_scope("ObtainGradients"): params = tf.trainable_variables() gradients = tf.gradients( ys=total_loss, xs=params, ) grads_and_vars= zip(gradients, params) with tf.variable_scope("ApplyGradients"): adam_optimizer = tf.train.AdamOptimizer ( learning_rate=learning_rate, name='AdamGradientDescent' ) training_step = adam_optimizer.apply_gradients( grads_and_vars=grads_and_vars, global_step=tf_global_step, name="apply_gradients_op" ) # # 5 Training # # ## 5.1 Helper functions for vizualiation during training # In[ ]: if not os.path.exists('reconstruction/'): os.makedirs('reconstruction/') # plots samples in a square def plot_reconstruction( samples, epoch, size_x=3, size_y=3, name="reconstruction"): if not (size_x * size_y) == len(samples): # l = min(len(samples),50) size_x = int(np.sqrt(l)) size_y = int(np.sqrt(l)) samples = samples[0:(size_x*size_y)] fig = plt.figure(figsize=(size_x, size_y)) gs = gridspec.GridSpec(size_x, size_y) gs.update(wspace=0.05, hspace=0.05) for i, sample in enumerate(samples): ax = plt.subplot(gs[i]) plt.axis('off') ax.set_xticklabels([]) ax.set_yticklabels([]) ax.set_aspect('equal') plt.imshow(sample.reshape(28, 28), cmap='Greys_r') outfile= "reconstruction/%s_%0.4d.png"%(name, epoch) plt.savefig(outfile) plt.close(fig) try: # only in ipython notebook display(Image(filename=outfile)) except: pass def plot_latent_space(session, current_step, save_checkpoint_after_each_step): # generate images nx = ny = 20 x_values = np.linspace(-3, 3, nx) y_values = np.linspace(-3, 3, ny) z_mu_grid = [] canvas = np.empty((28*ny, 28*nx)) for yi in x_values: for xi in y_values: z_mu_grid.append([xi, yi]) z_mu_grid = np.array(z_mu_grid) sampled_images = session.run( fetches=[ tf.nn.sigmoid(x_mean), ], feed_dict={ z_q:z_mu_grid }, )[0] current = 0 for i, yi in enumerate(x_values): for k, xi in enumerate(y_values): sampled_image = sampled_images[current] canvas[(nx-i-1)*28:(nx-i)*28, k*28:(k+1)*28] = sampled_image.reshape(28, 28) current+=1 plt.figure(figsize=(8, 10)) Xi, Yi = np.meshgrid(x_values, y_values) plt.imshow(canvas, origin="upper", cmap="gray") plt.tight_layout() outfile = 'reconstruction/%04.d.png'%(current_step / save_checkpoint_after_each_step) plt.savefig(outfile) try: # only in ipython notebook display(Image(filename=outfile)) except: pass # ## 5.2 Execute Training # # ## TODO # * Clean up this code # * Move helper function to library, because it is not essential for understanding # * Add image visualization in here. # * Remove timeline and tracing # In[ ]: tf.set_random_seed(RANDOM_SEED) np.random.seed(RANDOM_SEED) with tf.Session() as session: logger.info("Random Seed: %0.3f"%session.run(tf.random_normal([1], mean=-1, stddev=4, seed=RANDOM_SEED))[0]) tf.summary.scalar("total_loss",tf.cast(total_loss, tf.float32)) # summary for loss tf.summary.scalar("KL",tf.cast(tf.reduce_mean(kl_per_example), tf.float32)) # summary for loss tf.summary.scalar("RE",tf.cast(tf.reduce_mean(re_per_example), tf.float32)) # summary for loss all_summaries = tf.summary.merge_all() summary_writer = tf.summary.FileWriter(graph_outdir, graph=session.graph) session.run([ tf.local_variables_initializer(), tf.global_variables_initializer(), ]) saver = tf.train.Saver(tf.global_variables()) # Saver logger.info("Started training...") #TODO move to graph new_beta_ph = tf.placeholder(tf.float32, shape=()) update_beta = tf_beta.assign(new_beta_ph) # debug output operations mean_kl = tf.reduce_mean(kl_per_example) mean_re = tf.reduce_mean(re_per_example) mean_lnp = tf.reduce_mean(ln_p_z_per_example) mean_lnq = tf.reduce_mean(ln_q_z_per_example) session.graph.finalize() # prevent nodes beeing added to graph st = time.time() # timing for current_step, current_batch, current_epoch in trainings_iterator: # warmup for KL divergence new_beta=min((current_epoch-1)/100.0, 1.0) session.run(update_beta, feed_dict={new_beta_ph:new_beta}) # get current batch trainings_batch = data[current_batch] # run trainings operation _, tr_loss, tr_summaries, tr_kl, tr_re, tr_lnp, tr_lnq, tr_r, tr_k, tr_p,tr_q, x_reconstructed = session.run( fetches=[ training_step, total_loss, all_summaries, mean_kl, mean_re, mean_lnp, mean_lnq, re_per_example, kl_per_example, ln_p_z_per_example, ln_q_z_per_example, x_mean, ], feed_dict={ x_input:trainings_batch }, ) # write summaries and metadata info to graph summary_writer.add_summary(tr_summaries, current_step) # print training progress every now and then if current_step % print_loss_after_each_step==0: logger.info ("~StepT:%0.2fs Epoch %0.4d,Step:%i, Loss:%0.2f, KL:%0.2f, RE:%0.2f, ln(p):%0.2f, ln(q):%0.2f b:%0.2f"%( (time.time()-st)/float(current_step),# current time per step current_epoch, current_step,tr_loss, tr_kl, tr_re, tr_lnp, tr_lnq,new_beta) ) # save checkpoints once in a while if current_step % save_checkpoint_after_each_step==0: logger.info ("Saving checkpoint %i"%(current_step / save_checkpoint_after_each_step)) saver.save(session, j(graph_outdir, model_name), global_step=tf_global_step) logger.info ("Plotting Components %i"%(current_step / save_checkpoint_after_each_step)) components = session.run(component_means) plot_reconstruction(components, current_epoch, name="components") logger.info ("Plotting Reconstruction %i"%(current_step / save_checkpoint_after_each_step)) plot_reconstruction(x_reconstructed, current_epoch) if z_dim == 2: logger.info ("Plotting Latent Space %i"%(current_step / save_checkpoint_after_each_step)) plot_latent_space( session,current_step, save_checkpoint_after_each_step) # In[ ]:
stefanthaler/tf-spikes
vampprior/train_vampprior.py
Python
apache-2.0
29,403
[ "Gaussian" ]
3036bd1b81d7036ad15e9d5916b4e8e4cdc5b850e43b69320af096ddbdd98e12
import pickle import pymongo import unittest import warnings from datetime import datetime import pymongo import pickle import weakref from fixtures import Base, Mixin, PickleEmbedded, PickleTest from mongoengine import * from mongoengine.base import _document_registry, NotRegistered, InvalidDocumentError from mongoengine.connection import _get_db class DocumentTest(unittest.TestCase): def setUp(self): connect(db='mongoenginetest') self.db = _get_db() class Person(Document): name = StringField() age = IntField() self.Person = Person def tearDown(self): self.Person.drop_collection() def test_drop_collection(self): """Ensure that the collection may be dropped from the database. """ self.Person(name='Test').save() collection = self.Person._get_collection_name() self.assertTrue(collection in self.db.collection_names()) self.Person.drop_collection() self.assertFalse(collection in self.db.collection_names()) def test_definition(self): """Ensure that document may be defined using fields. """ name_field = StringField() age_field = IntField() class Person(Document): name = name_field age = age_field non_field = True self.assertEqual(Person._fields['name'], name_field) self.assertEqual(Person._fields['age'], age_field) self.assertFalse('non_field' in Person._fields) self.assertTrue('id' in Person._fields) # Test iteration over fields fields = list(Person()) self.assertTrue('name' in fields and 'age' in fields) # Ensure Document isn't treated like an actual document self.assertFalse(hasattr(Document, '_fields')) def test_collection_name(self): """Ensure that a collection with a specified name may be used. """ class DefaultNamingTest(Document): pass self.assertEquals('default_naming_test', DefaultNamingTest._get_collection_name()) class CustomNamingTest(Document): meta = {'collection': 'pimp_my_collection'} self.assertEquals('pimp_my_collection', CustomNamingTest._get_collection_name()) class DynamicNamingTest(Document): meta = {'collection': lambda c: "DYNAMO"} self.assertEquals('DYNAMO', DynamicNamingTest._get_collection_name()) # Use Abstract class to handle backwards compatibility class BaseDocument(Document): meta = { 'abstract': True, 'collection': lambda c: c.__name__.lower() } class OldNamingConvention(BaseDocument): pass self.assertEquals('oldnamingconvention', OldNamingConvention._get_collection_name()) class InheritedAbstractNamingTest(BaseDocument): meta = {'collection': 'wibble'} self.assertEquals('wibble', InheritedAbstractNamingTest._get_collection_name()) with warnings.catch_warnings(record=True) as w: # Cause all warnings to always be triggered. warnings.simplefilter("always") class NonAbstractBase(Document): pass class InheritedDocumentFailTest(NonAbstractBase): meta = {'collection': 'fail'} self.assertTrue(issubclass(w[0].category, SyntaxWarning)) self.assertEquals('non_abstract_base', InheritedDocumentFailTest._get_collection_name()) # Mixin tests class BaseMixin(object): meta = { 'collection': lambda c: c.__name__.lower() } class OldMixinNamingConvention(Document, BaseMixin): pass self.assertEquals('oldmixinnamingconvention', OldMixinNamingConvention._get_collection_name()) class BaseMixin(object): meta = { 'collection': lambda c: c.__name__.lower() } class BaseDocument(Document, BaseMixin): pass class MyDocument(BaseDocument): pass self.assertEquals('mydocument', OldMixinNamingConvention._get_collection_name()) def test_get_superclasses(self): """Ensure that the correct list of superclasses is assembled. """ class Animal(Document): pass class Fish(Animal): pass class Mammal(Animal): pass class Human(Mammal): pass class Dog(Mammal): pass mammal_superclasses = {'Animal': Animal} self.assertEqual(Mammal._superclasses, mammal_superclasses) dog_superclasses = { 'Animal': Animal, 'Animal.Mammal': Mammal, } self.assertEqual(Dog._superclasses, dog_superclasses) def test_get_subclasses(self): """Ensure that the correct list of subclasses is retrieved by the _get_subclasses method. """ class Animal(Document): pass class Fish(Animal): pass class Mammal(Animal): pass class Human(Mammal): pass class Dog(Mammal): pass mammal_subclasses = { 'Animal.Mammal.Dog': Dog, 'Animal.Mammal.Human': Human } self.assertEqual(Mammal._get_subclasses(), mammal_subclasses) animal_subclasses = { 'Animal.Fish': Fish, 'Animal.Mammal': Mammal, 'Animal.Mammal.Dog': Dog, 'Animal.Mammal.Human': Human } self.assertEqual(Animal._get_subclasses(), animal_subclasses) def test_external_super_and_sub_classes(self): """Ensure that the correct list of sub and super classes is assembled. when importing part of the model """ class Animal(Base): pass class Fish(Animal): pass class Mammal(Animal): pass class Human(Mammal): pass class Dog(Mammal): pass mammal_superclasses = {'Base': Base, 'Base.Animal': Animal} self.assertEqual(Mammal._superclasses, mammal_superclasses) dog_superclasses = { 'Base': Base, 'Base.Animal': Animal, 'Base.Animal.Mammal': Mammal, } self.assertEqual(Dog._superclasses, dog_superclasses) animal_subclasses = { 'Base.Animal.Fish': Fish, 'Base.Animal.Mammal': Mammal, 'Base.Animal.Mammal.Dog': Dog, 'Base.Animal.Mammal.Human': Human } self.assertEqual(Animal._get_subclasses(), animal_subclasses) mammal_subclasses = { 'Base.Animal.Mammal.Dog': Dog, 'Base.Animal.Mammal.Human': Human } self.assertEqual(Mammal._get_subclasses(), mammal_subclasses) Base.drop_collection() h = Human() h.save() self.assertEquals(Human.objects.count(), 1) self.assertEquals(Mammal.objects.count(), 1) self.assertEquals(Animal.objects.count(), 1) self.assertEquals(Base.objects.count(), 1) Base.drop_collection() def test_polymorphic_queries(self): """Ensure that the correct subclasses are returned from a query""" class Animal(Document): pass class Fish(Animal): pass class Mammal(Animal): pass class Human(Mammal): pass class Dog(Mammal): pass Animal.drop_collection() Animal().save() Fish().save() Mammal().save() Human().save() Dog().save() classes = [obj.__class__ for obj in Animal.objects] self.assertEqual(classes, [Animal, Fish, Mammal, Human, Dog]) classes = [obj.__class__ for obj in Mammal.objects] self.assertEqual(classes, [Mammal, Human, Dog]) classes = [obj.__class__ for obj in Human.objects] self.assertEqual(classes, [Human]) Animal.drop_collection() def test_polymorphic_references(self): """Ensure that the correct subclasses are returned from a query when using references / generic references """ class Animal(Document): pass class Fish(Animal): pass class Mammal(Animal): pass class Human(Mammal): pass class Dog(Mammal): pass class Zoo(Document): animals = ListField(ReferenceField(Animal)) Zoo.drop_collection() Animal.drop_collection() Animal().save() Fish().save() Mammal().save() Human().save() Dog().save() # Save a reference to each animal zoo = Zoo(animals=Animal.objects) zoo.save() zoo.reload() classes = [a.__class__ for a in Zoo.objects.first().animals] self.assertEqual(classes, [Animal, Fish, Mammal, Human, Dog]) Zoo.drop_collection() class Zoo(Document): animals = ListField(GenericReferenceField(Animal)) # Save a reference to each animal zoo = Zoo(animals=Animal.objects) zoo.save() zoo.reload() classes = [a.__class__ for a in Zoo.objects.first().animals] self.assertEqual(classes, [Animal, Fish, Mammal, Human, Dog]) Zoo.drop_collection() Animal.drop_collection() def test_reference_inheritance(self): class Stats(Document): created = DateTimeField(default=datetime.now) meta = {'allow_inheritance': False} class CompareStats(Document): generated = DateTimeField(default=datetime.now) stats = ListField(ReferenceField(Stats)) Stats.drop_collection() CompareStats.drop_collection() list_stats = [] for i in xrange(10): s = Stats() s.save() list_stats.append(s) cmp_stats = CompareStats(stats=list_stats) cmp_stats.save() self.assertEqual(list_stats, CompareStats.objects.first().stats) def test_inheritance(self): """Ensure that document may inherit fields from a superclass document. """ class Employee(self.Person): salary = IntField() self.assertTrue('name' in Employee._fields) self.assertTrue('salary' in Employee._fields) self.assertEqual(Employee._get_collection_name(), self.Person._get_collection_name()) # Ensure that MRO error is not raised class A(Document): pass class B(A): pass class C(B): pass def test_allow_inheritance(self): """Ensure that inheritance may be disabled on simple classes and that _cls and _types will not be used. """ class Animal(Document): name = StringField() meta = {'allow_inheritance': False} Animal.drop_collection() def create_dog_class(): class Dog(Animal): pass self.assertRaises(ValueError, create_dog_class) # Check that _cls etc aren't present on simple documents dog = Animal(name='dog') dog.save() collection = self.db[Animal._get_collection_name()] obj = collection.find_one() self.assertFalse('_cls' in obj) self.assertFalse('_types' in obj) Animal.drop_collection() def create_employee_class(): class Employee(self.Person): meta = {'allow_inheritance': False} self.assertRaises(ValueError, create_employee_class) # Test the same for embedded documents class Comment(EmbeddedDocument): content = StringField() meta = {'allow_inheritance': False} def create_special_comment(): class SpecialComment(Comment): pass self.assertRaises(ValueError, create_special_comment) comment = Comment(content='test') self.assertFalse('_cls' in comment.to_mongo()) self.assertFalse('_types' in comment.to_mongo()) def test_allow_inheritance_abstract_document(self): """Ensure that abstract documents can set inheritance rules and that _cls and _types will not be used. """ class FinalDocument(Document): meta = {'abstract': True, 'allow_inheritance': False} class Animal(FinalDocument): name = StringField() Animal.drop_collection() def create_dog_class(): class Dog(Animal): pass self.assertRaises(ValueError, create_dog_class) # Check that _cls etc aren't present on simple documents dog = Animal(name='dog') dog.save() collection = self.db[Animal._get_collection_name()] obj = collection.find_one() self.assertFalse('_cls' in obj) self.assertFalse('_types' in obj) Animal.drop_collection() def test_how_to_turn_off_inheritance(self): """Demonstrates migrating from allow_inheritance = True to False. """ class Animal(Document): name = StringField() meta = { 'indexes': ['name'] } Animal.drop_collection() dog = Animal(name='dog') dog.save() collection = self.db[Animal._get_collection_name()] obj = collection.find_one() self.assertTrue('_cls' in obj) self.assertTrue('_types' in obj) info = collection.index_information() info = [value['key'] for key, value in info.iteritems()] self.assertEquals([[(u'_id', 1)], [(u'_types', 1), (u'name', 1)]], info) # Turn off inheritance class Animal(Document): name = StringField() meta = { 'allow_inheritance': False, 'indexes': ['name'] } collection.update({}, {"$unset": {"_types": 1, "_cls": 1}}, multi=True) # Confirm extra data is removed obj = collection.find_one() self.assertFalse('_cls' in obj) self.assertFalse('_types' in obj) info = collection.index_information() info = [value['key'] for key, value in info.iteritems()] self.assertEquals([[(u'_id', 1)], [(u'_types', 1), (u'name', 1)]], info) info = collection.index_information() indexes_to_drop = [key for key, value in info.iteritems() if '_types' in dict(value['key'])] for index in indexes_to_drop: collection.drop_index(index) info = collection.index_information() info = [value['key'] for key, value in info.iteritems()] self.assertEquals([[(u'_id', 1)]], info) # Recreate indexes dog = Animal.objects.first() dog.save() info = collection.index_information() info = [value['key'] for key, value in info.iteritems()] self.assertEquals([[(u'_id', 1)], [(u'name', 1),]], info) Animal.drop_collection() def test_abstract_documents(self): """Ensure that a document superclass can be marked as abstract thereby not using it as the name for the collection.""" class Animal(Document): name = StringField() meta = {'abstract': True} class Fish(Animal): pass class Guppy(Fish): pass class Mammal(Animal): meta = {'abstract': True} class Human(Mammal): pass self.assertFalse('collection' in Animal._meta) self.assertFalse('collection' in Mammal._meta) self.assertEqual(Animal._get_collection_name(), None) self.assertEqual(Mammal._get_collection_name(), None) self.assertEqual(Fish._get_collection_name(), 'fish') self.assertEqual(Guppy._get_collection_name(), 'fish') self.assertEqual(Human._get_collection_name(), 'human') def create_bad_abstract(): class EvilHuman(Human): evil = BooleanField(default=True) meta = {'abstract': True} self.assertRaises(ValueError, create_bad_abstract) def test_collection_name(self): """Ensure that a collection with a specified name may be used. """ collection = 'personCollTest' if collection in self.db.collection_names(): self.db.drop_collection(collection) class Person(Document): name = StringField() meta = {'collection': collection} user = Person(name="Test User") user.save() self.assertTrue(collection in self.db.collection_names()) user_obj = self.db[collection].find_one() self.assertEqual(user_obj['name'], "Test User") user_obj = Person.objects[0] self.assertEqual(user_obj.name, "Test User") Person.drop_collection() self.assertFalse(collection in self.db.collection_names()) def test_collection_name_and_primary(self): """Ensure that a collection with a specified name may be used. """ class Person(Document): name = StringField(primary_key=True) meta = {'collection': 'app'} user = Person(name="Test User") user.save() user_obj = Person.objects[0] self.assertEqual(user_obj.name, "Test User") Person.drop_collection() def test_inherited_collections(self): """Ensure that subclassed documents don't override parents' collections. """ with warnings.catch_warnings(record=True) as w: # Cause all warnings to always be triggered. warnings.simplefilter("always") class Drink(Document): name = StringField() class AlcoholicDrink(Drink): meta = {'collection': 'booze'} class Drinker(Document): drink = GenericReferenceField() # Confirm we triggered a SyntaxWarning assert issubclass(w[0].category, SyntaxWarning) Drink.drop_collection() AlcoholicDrink.drop_collection() Drinker.drop_collection() red_bull = Drink(name='Red Bull') red_bull.save() programmer = Drinker(drink=red_bull) programmer.save() beer = AlcoholicDrink(name='Beer') beer.save() real_person = Drinker(drink=beer) real_person.save() self.assertEqual(Drinker.objects[0].drink.name, red_bull.name) self.assertEqual(Drinker.objects[1].drink.name, beer.name) def test_capped_collection(self): """Ensure that capped collections work properly. """ class Log(Document): date = DateTimeField(default=datetime.now) meta = { 'max_documents': 10, 'max_size': 90000, } Log.drop_collection() # Ensure that the collection handles up to its maximum for i in range(10): Log().save() self.assertEqual(len(Log.objects), 10) # Check that extra documents don't increase the size Log().save() self.assertEqual(len(Log.objects), 10) options = Log.objects._collection.options() self.assertEqual(options['capped'], True) self.assertEqual(options['max'], 10) self.assertEqual(options['size'], 90000) # Check that the document cannot be redefined with different options def recreate_log_document(): class Log(Document): date = DateTimeField(default=datetime.now) meta = { 'max_documents': 11, } # Create the collection by accessing Document.objects Log.objects self.assertRaises(InvalidCollectionError, recreate_log_document) Log.drop_collection() def test_indexes(self): """Ensure that indexes are used when meta[indexes] is specified. """ class BlogPost(Document): date = DateTimeField(db_field='addDate', default=datetime.now) category = StringField() tags = ListField(StringField()) meta = { 'indexes': [ '-date', 'tags', ('category', '-date') ], } BlogPost.drop_collection() info = BlogPost.objects._collection.index_information() # _id, '-date', 'tags', ('cat', 'date') # NB: there is no index on _types by itself, since # the indices on -date and tags will both contain # _types as first element in the key self.assertEqual(len(info), 4) # Indexes are lazy so use list() to perform query list(BlogPost.objects) info = BlogPost.objects._collection.index_information() info = [value['key'] for key, value in info.iteritems()] self.assertTrue([('_types', 1), ('category', 1), ('addDate', -1)] in info) self.assertTrue([('_types', 1), ('addDate', -1)] in info) # tags is a list field so it shouldn't have _types in the index self.assertTrue([('tags', 1)] in info) class ExtendedBlogPost(BlogPost): title = StringField() meta = {'indexes': ['title']} BlogPost.drop_collection() list(ExtendedBlogPost.objects) info = ExtendedBlogPost.objects._collection.index_information() info = [value['key'] for key, value in info.iteritems()] self.assertTrue([('_types', 1), ('category', 1), ('addDate', -1)] in info) self.assertTrue([('_types', 1), ('addDate', -1)] in info) self.assertTrue([('_types', 1), ('title', 1)] in info) BlogPost.drop_collection() def test_dictionary_indexes(self): """Ensure that indexes are used when meta[indexes] contains dictionaries instead of lists. """ class BlogPost(Document): date = DateTimeField(db_field='addDate', default=datetime.now) category = StringField() tags = ListField(StringField()) meta = { 'indexes': [ { 'fields': ['-date'], 'unique': True, 'sparse': True, 'types': False }, ], } BlogPost.drop_collection() info = BlogPost.objects._collection.index_information() # _id, '-date' self.assertEqual(len(info), 3) # Indexes are lazy so use list() to perform query list(BlogPost.objects) info = BlogPost.objects._collection.index_information() info = [(value['key'], value.get('unique', False), value.get('sparse', False)) for key, value in info.iteritems()] self.assertTrue(([('addDate', -1)], True, True) in info) BlogPost.drop_collection() def test_embedded_document_index(self): """Tests settings an index on an embedded document """ class Date(EmbeddedDocument): year = IntField(db_field='yr') class BlogPost(Document): title = StringField() date = EmbeddedDocumentField(Date) meta = { 'indexes': [ '-date.year' ], } BlogPost.drop_collection() info = BlogPost.objects._collection.index_information() self.assertEqual(info.keys(), ['_types_1_date.yr_-1', '_id_']) BlogPost.drop_collection() def test_list_embedded_document_index(self): """Ensure list embedded documents can be indexed """ class Tag(EmbeddedDocument): name = StringField(db_field='tag') class BlogPost(Document): title = StringField() tags = ListField(EmbeddedDocumentField(Tag)) meta = { 'indexes': [ 'tags.name' ], } BlogPost.drop_collection() info = BlogPost.objects._collection.index_information() # we don't use _types in with list fields by default self.assertEqual(info.keys(), ['_id_', '_types_1', 'tags.tag_1']) post1 = BlogPost(title="Embedded Indexes tests in place", tags=[Tag(name="about"), Tag(name="time")] ) post1.save() BlogPost.drop_collection() def test_geo_indexes_recursion(self): class User(Document): channel = ReferenceField('Channel') location = GeoPointField() class Channel(Document): user = ReferenceField('User') location = GeoPointField() self.assertEquals(len(User._geo_indices()), 2) def test_hint(self): class BlogPost(Document): tags = ListField(StringField()) meta = { 'indexes': [ 'tags', ], } BlogPost.drop_collection() for i in xrange(0, 10): tags = [("tag %i" % n) for n in xrange(0, i % 2)] BlogPost(tags=tags).save() self.assertEquals(BlogPost.objects.count(), 10) self.assertEquals(BlogPost.objects.hint().count(), 10) self.assertEquals(BlogPost.objects.hint([('tags', 1)]).count(), 10) self.assertEquals(BlogPost.objects.hint([('ZZ', 1)]).count(), 10) def invalid_index(): BlogPost.objects.hint('tags') self.assertRaises(TypeError, invalid_index) def invalid_index_2(): return BlogPost.objects.hint(('tags', 1)) self.assertRaises(TypeError, invalid_index_2) def test_unique(self): """Ensure that uniqueness constraints are applied to fields. """ class BlogPost(Document): title = StringField() slug = StringField(unique=True) BlogPost.drop_collection() post1 = BlogPost(title='test1', slug='test') post1.save() # Two posts with the same slug is not allowed post2 = BlogPost(title='test2', slug='test') self.assertRaises(OperationError, post2.save) def test_unique_with(self): """Ensure that unique_with constraints are applied to fields. """ class Date(EmbeddedDocument): year = IntField(db_field='yr') class BlogPost(Document): title = StringField() date = EmbeddedDocumentField(Date) slug = StringField(unique_with='date.year') BlogPost.drop_collection() post1 = BlogPost(title='test1', date=Date(year=2009), slug='test') post1.save() # day is different so won't raise exception post2 = BlogPost(title='test2', date=Date(year=2010), slug='test') post2.save() # Now there will be two docs with the same slug and the same day: fail post3 = BlogPost(title='test3', date=Date(year=2010), slug='test') self.assertRaises(OperationError, post3.save) BlogPost.drop_collection() def test_unique_embedded_document(self): """Ensure that uniqueness constraints are applied to fields on embedded documents. """ class SubDocument(EmbeddedDocument): year = IntField(db_field='yr') slug = StringField(unique=True) class BlogPost(Document): title = StringField() sub = EmbeddedDocumentField(SubDocument) BlogPost.drop_collection() post1 = BlogPost(title='test1', sub=SubDocument(year=2009, slug="test")) post1.save() # sub.slug is different so won't raise exception post2 = BlogPost(title='test2', sub=SubDocument(year=2010, slug='another-slug')) post2.save() # Now there will be two docs with the same sub.slug post3 = BlogPost(title='test3', sub=SubDocument(year=2010, slug='test')) self.assertRaises(OperationError, post3.save) BlogPost.drop_collection() def test_unique_with_embedded_document_and_embedded_unique(self): """Ensure that uniqueness constraints are applied to fields on embedded documents. And work with unique_with as well. """ class SubDocument(EmbeddedDocument): year = IntField(db_field='yr') slug = StringField(unique=True) class BlogPost(Document): title = StringField(unique_with='sub.year') sub = EmbeddedDocumentField(SubDocument) BlogPost.drop_collection() post1 = BlogPost(title='test1', sub=SubDocument(year=2009, slug="test")) post1.save() # sub.slug is different so won't raise exception post2 = BlogPost(title='test2', sub=SubDocument(year=2010, slug='another-slug')) post2.save() # Now there will be two docs with the same sub.slug post3 = BlogPost(title='test3', sub=SubDocument(year=2010, slug='test')) self.assertRaises(OperationError, post3.save) # Now there will be two docs with the same title and year post3 = BlogPost(title='test1', sub=SubDocument(year=2009, slug='test-1')) self.assertRaises(OperationError, post3.save) BlogPost.drop_collection() def test_unique_and_indexes(self): """Ensure that 'unique' constraints aren't overridden by meta.indexes. """ class Customer(Document): cust_id = IntField(unique=True, required=True) meta = { 'indexes': ['cust_id'], 'allow_inheritance': False, } Customer.drop_collection() cust = Customer(cust_id=1) cust.save() cust_dupe = Customer(cust_id=1) try: cust_dupe.save() raise AssertionError, "We saved a dupe!" except OperationError: pass Customer.drop_collection() def test_unique_and_primary(self): """If you set a field as primary, then unexpected behaviour can occur. You won't create a duplicate but you will update an existing document. """ class User(Document): name = StringField(primary_key=True, unique=True) password = StringField() User.drop_collection() user = User(name='huangz', password='secret') user.save() user = User(name='huangz', password='secret2') user.save() self.assertEqual(User.objects.count(), 1) self.assertEqual(User.objects.get().password, 'secret2') User.drop_collection() def test_custom_id_field(self): """Ensure that documents may be created with custom primary keys. """ class User(Document): username = StringField(primary_key=True) name = StringField() User.drop_collection() self.assertEqual(User._fields['username'].db_field, '_id') self.assertEqual(User._meta['id_field'], 'username') def create_invalid_user(): User(name='test').save() # no primary key field self.assertRaises(ValidationError, create_invalid_user) def define_invalid_user(): class EmailUser(User): email = StringField(primary_key=True) self.assertRaises(ValueError, define_invalid_user) class EmailUser(User): email = StringField() user = User(username='test', name='test user') user.save() user_obj = User.objects.first() self.assertEqual(user_obj.id, 'test') self.assertEqual(user_obj.pk, 'test') user_son = User.objects._collection.find_one() self.assertEqual(user_son['_id'], 'test') self.assertTrue('username' not in user_son['_id']) User.drop_collection() user = User(pk='mongo', name='mongo user') user.save() user_obj = User.objects.first() self.assertEqual(user_obj.id, 'mongo') self.assertEqual(user_obj.pk, 'mongo') user_son = User.objects._collection.find_one() self.assertEqual(user_son['_id'], 'mongo') self.assertTrue('username' not in user_son['_id']) User.drop_collection() def test_document_not_registered(self): class Place(Document): name = StringField() class NicePlace(Place): pass Place.drop_collection() Place(name="London").save() NicePlace(name="Buckingham Palace").save() # Mimic Place and NicePlace definitions being in a different file # and the NicePlace model not being imported in at query time. @classmethod def _get_subclasses(cls): return {} Place._get_subclasses = _get_subclasses def query_without_importing_nice_place(): print Place.objects.all() self.assertRaises(NotRegistered, query_without_importing_nice_place) def test_creation(self): """Ensure that document may be created using keyword arguments. """ person = self.Person(name="Test User", age=30) self.assertEqual(person.name, "Test User") self.assertEqual(person.age, 30) def test_to_dbref(self): """Ensure that you can get a dbref of a document""" person = self.Person(name="Test User", age=30) self.assertRaises(OperationError, person.to_dbref) person.save() person.to_dbref() def test_reload(self): """Ensure that attributes may be reloaded. """ person = self.Person(name="Test User", age=20) person.save() person_obj = self.Person.objects.first() person_obj.name = "Mr Test User" person_obj.age = 21 person_obj.save() self.assertEqual(person.name, "Test User") self.assertEqual(person.age, 20) person.reload() self.assertEqual(person.name, "Mr Test User") self.assertEqual(person.age, 21) def test_reload_referencing(self): """Ensures reloading updates weakrefs correctly """ class Embedded(EmbeddedDocument): dict_field = DictField() list_field = ListField() class Doc(Document): dict_field = DictField() list_field = ListField() embedded_field = EmbeddedDocumentField(Embedded) Doc.drop_collection() doc = Doc() doc.dict_field = {'hello': 'world'} doc.list_field = ['1', 2, {'hello': 'world'}] embedded_1 = Embedded() embedded_1.dict_field = {'hello': 'world'} embedded_1.list_field = ['1', 2, {'hello': 'world'}] doc.embedded_field = embedded_1 doc.save() doc.reload() doc.list_field.append(1) doc.dict_field['woot'] = "woot" doc.embedded_field.list_field.append(1) doc.embedded_field.dict_field['woot'] = "woot" self.assertEquals(doc._get_changed_fields(), [ 'list_field', 'dict_field', 'embedded_field.list_field', 'embedded_field.dict_field']) doc.save() doc.reload() self.assertEquals(doc._get_changed_fields(), []) self.assertEquals(len(doc.list_field), 4) self.assertEquals(len(doc.dict_field), 2) self.assertEquals(len(doc.embedded_field.list_field), 4) self.assertEquals(len(doc.embedded_field.dict_field), 2) def test_dictionary_access(self): """Ensure that dictionary-style field access works properly. """ person = self.Person(name='Test User', age=30) self.assertEquals(person['name'], 'Test User') self.assertRaises(KeyError, person.__getitem__, 'salary') self.assertRaises(KeyError, person.__setitem__, 'salary', 50) person['name'] = 'Another User' self.assertEquals(person['name'], 'Another User') # Length = length(assigned fields + id) self.assertEquals(len(person), 3) self.assertTrue('age' in person) person.age = None self.assertFalse('age' in person) self.assertFalse('nationality' in person) def test_embedded_document(self): """Ensure that embedded documents are set up correctly. """ class Comment(EmbeddedDocument): content = StringField() self.assertTrue('content' in Comment._fields) self.assertFalse('id' in Comment._fields) self.assertFalse('collection' in Comment._meta) def test_embedded_document_validation(self): """Ensure that embedded documents may be validated. """ class Comment(EmbeddedDocument): date = DateTimeField() content = StringField(required=True) comment = Comment() self.assertRaises(ValidationError, comment.validate) comment.content = 'test' comment.validate() comment.date = 4 self.assertRaises(ValidationError, comment.validate) comment.date = datetime.now() comment.validate() def test_save(self): """Ensure that a document may be saved in the database. """ # Create person object and save it to the database person = self.Person(name='Test User', age=30) person.save() # Ensure that the object is in the database collection = self.db[self.Person._get_collection_name()] person_obj = collection.find_one({'name': 'Test User'}) self.assertEqual(person_obj['name'], 'Test User') self.assertEqual(person_obj['age'], 30) self.assertEqual(person_obj['_id'], person.id) # Test skipping validation on save class Recipient(Document): email = EmailField(required=True) recipient = Recipient(email='root@localhost') self.assertRaises(ValidationError, recipient.save) try: recipient.save(validate=False) except ValidationError: self.fail() def test_save_to_a_value_that_equates_to_false(self): class Thing(EmbeddedDocument): count = IntField() class User(Document): thing = EmbeddedDocumentField(Thing) User.drop_collection() user = User(thing=Thing(count=1)) user.save() user.reload() user.thing.count = 0 user.save() user.reload() self.assertEquals(user.thing.count, 0) def test_save_max_recursion_not_hit(self): class Person(Document): name = StringField() parent = ReferenceField('self') friend = ReferenceField('self') Person.drop_collection() p1 = Person(name="Wilson Snr") p1.parent = None p1.save() p2 = Person(name="Wilson Jr") p2.parent = p1 p2.save() p1.friend = p2 p1.save() # Confirm can save and it resets the changed fields without hitting # max recursion error p0 = Person.objects.first() p0.name = 'wpjunior' p0.save() def test_save_cascades(self): class Person(Document): name = StringField() parent = ReferenceField('self') Person.drop_collection() p1 = Person(name="Wilson Snr") p1.parent = None p1.save() p2 = Person(name="Wilson Jr") p2.parent = p1 p2.save() p = Person.objects(name="Wilson Jr").get() p.parent.name = "Daddy Wilson" p.save() p1.reload() self.assertEquals(p1.name, p.parent.name) def test_save_cascades_generically(self): class Person(Document): name = StringField() parent = GenericReferenceField() Person.drop_collection() p1 = Person(name="Wilson Snr") p1.save() p2 = Person(name="Wilson Jr") p2.parent = p1 p2.save() p = Person.objects(name="Wilson Jr").get() p.parent.name = "Daddy Wilson" p.save() p1.reload() self.assertEquals(p1.name, p.parent.name) def test_update(self): """Ensure that an existing document is updated instead of be overwritten. """ # Create person object and save it to the database person = self.Person(name='Test User', age=30) person.save() # Create same person object, with same id, without age same_person = self.Person(name='Test') same_person.id = person.id same_person.save() # Confirm only one object self.assertEquals(self.Person.objects.count(), 1) # reload person.reload() same_person.reload() # Confirm the same self.assertEqual(person, same_person) self.assertEqual(person.name, same_person.name) self.assertEqual(person.age, same_person.age) # Confirm the saved values self.assertEqual(person.name, 'Test') self.assertEqual(person.age, 30) # Test only / exclude only updates included fields person = self.Person.objects.only('name').get() person.name = 'User' person.save() person.reload() self.assertEqual(person.name, 'User') self.assertEqual(person.age, 30) # test exclude only updates set fields person = self.Person.objects.exclude('name').get() person.age = 21 person.save() person.reload() self.assertEqual(person.name, 'User') self.assertEqual(person.age, 21) # Test only / exclude can set non excluded / included fields person = self.Person.objects.only('name').get() person.name = 'Test' person.age = 30 person.save() person.reload() self.assertEqual(person.name, 'Test') self.assertEqual(person.age, 30) # test exclude only updates set fields person = self.Person.objects.exclude('name').get() person.name = 'User' person.age = 21 person.save() person.reload() self.assertEqual(person.name, 'User') self.assertEqual(person.age, 21) # Confirm does remove unrequired fields person = self.Person.objects.exclude('name').get() person.age = None person.save() person.reload() self.assertEqual(person.name, 'User') self.assertEqual(person.age, None) person = self.Person.objects.get() person.name = None person.age = None person.save() person.reload() self.assertEqual(person.name, None) self.assertEqual(person.age, None) def test_document_update(self): def update_not_saved_raises(): person = self.Person(name='dcrosta') person.update(set__name='Dan Crosta') self.assertRaises(OperationError, update_not_saved_raises) author = self.Person(name='dcrosta') author.save() author.update(set__name='Dan Crosta') author.reload() p1 = self.Person.objects.first() self.assertEquals(p1.name, author.name) def update_no_value_raises(): person = self.Person.objects.first() person.update() self.assertRaises(OperationError, update_no_value_raises) def test_embedded_update(self): """ Test update on `EmbeddedDocumentField` fields """ class Page(EmbeddedDocument): log_message = StringField(verbose_name="Log message", required=True) class Site(Document): page = EmbeddedDocumentField(Page) Site.drop_collection() site = Site(page=Page(log_message="Warning: Dummy message")) site.save() # Update site = Site.objects.first() site.page.log_message = "Error: Dummy message" site.save() site = Site.objects.first() self.assertEqual(site.page.log_message, "Error: Dummy message") def test_embedded_update_db_field(self): """ Test update on `EmbeddedDocumentField` fields when db_field is other than default. """ class Page(EmbeddedDocument): log_message = StringField(verbose_name="Log message", db_field="page_log_message", required=True) class Site(Document): page = EmbeddedDocumentField(Page) Site.drop_collection() site = Site(page=Page(log_message="Warning: Dummy message")) site.save() # Update site = Site.objects.first() site.page.log_message = "Error: Dummy message" site.save() site = Site.objects.first() self.assertEqual(site.page.log_message, "Error: Dummy message") def test_delta(self): class Doc(Document): string_field = StringField() int_field = IntField() dict_field = DictField() list_field = ListField() Doc.drop_collection() doc = Doc() doc.save() doc = Doc.objects.first() self.assertEquals(doc._get_changed_fields(), []) self.assertEquals(doc._delta(), ({}, {})) doc.string_field = 'hello' self.assertEquals(doc._get_changed_fields(), ['string_field']) self.assertEquals(doc._delta(), ({'string_field': 'hello'}, {})) doc._changed_fields = [] doc.int_field = 1 self.assertEquals(doc._get_changed_fields(), ['int_field']) self.assertEquals(doc._delta(), ({'int_field': 1}, {})) doc._changed_fields = [] dict_value = {'hello': 'world', 'ping': 'pong'} doc.dict_field = dict_value self.assertEquals(doc._get_changed_fields(), ['dict_field']) self.assertEquals(doc._delta(), ({'dict_field': dict_value}, {})) doc._changed_fields = [] list_value = ['1', 2, {'hello': 'world'}] doc.list_field = list_value self.assertEquals(doc._get_changed_fields(), ['list_field']) self.assertEquals(doc._delta(), ({'list_field': list_value}, {})) # Test unsetting doc._changed_fields = [] doc.dict_field = {} self.assertEquals(doc._get_changed_fields(), ['dict_field']) self.assertEquals(doc._delta(), ({}, {'dict_field': 1})) doc._changed_fields = [] doc.list_field = [] self.assertEquals(doc._get_changed_fields(), ['list_field']) self.assertEquals(doc._delta(), ({}, {'list_field': 1})) def test_delta_recursive(self): class Embedded(EmbeddedDocument): string_field = StringField() int_field = IntField() dict_field = DictField() list_field = ListField() class Doc(Document): string_field = StringField() int_field = IntField() dict_field = DictField() list_field = ListField() embedded_field = EmbeddedDocumentField(Embedded) Doc.drop_collection() doc = Doc() doc.save() doc = Doc.objects.first() self.assertEquals(doc._get_changed_fields(), []) self.assertEquals(doc._delta(), ({}, {})) embedded_1 = Embedded() embedded_1.string_field = 'hello' embedded_1.int_field = 1 embedded_1.dict_field = {'hello': 'world'} embedded_1.list_field = ['1', 2, {'hello': 'world'}] doc.embedded_field = embedded_1 self.assertEquals(doc._get_changed_fields(), ['embedded_field']) embedded_delta = { '_types': ['Embedded'], '_cls': 'Embedded', 'string_field': 'hello', 'int_field': 1, 'dict_field': {'hello': 'world'}, 'list_field': ['1', 2, {'hello': 'world'}] } self.assertEquals(doc.embedded_field._delta(), (embedded_delta, {})) self.assertEquals(doc._delta(), ({'embedded_field': embedded_delta}, {})) doc.save() doc.reload() doc.embedded_field.dict_field = {} self.assertEquals(doc._get_changed_fields(), ['embedded_field.dict_field']) self.assertEquals(doc.embedded_field._delta(), ({}, {'dict_field': 1})) self.assertEquals(doc._delta(), ({}, {'embedded_field.dict_field': 1})) doc.save() doc.reload() self.assertEquals(doc.embedded_field.dict_field, {}) doc.embedded_field.list_field = [] self.assertEquals(doc._get_changed_fields(), ['embedded_field.list_field']) self.assertEquals(doc.embedded_field._delta(), ({}, {'list_field': 1})) self.assertEquals(doc._delta(), ({}, {'embedded_field.list_field': 1})) doc.save() doc.reload() self.assertEquals(doc.embedded_field.list_field, []) embedded_2 = Embedded() embedded_2.string_field = 'hello' embedded_2.int_field = 1 embedded_2.dict_field = {'hello': 'world'} embedded_2.list_field = ['1', 2, {'hello': 'world'}] doc.embedded_field.list_field = ['1', 2, embedded_2] self.assertEquals(doc._get_changed_fields(), ['embedded_field.list_field']) self.assertEquals(doc.embedded_field._delta(), ({ 'list_field': ['1', 2, { '_cls': 'Embedded', '_types': ['Embedded'], 'string_field': 'hello', 'dict_field': {'hello': 'world'}, 'int_field': 1, 'list_field': ['1', 2, {'hello': 'world'}], }] }, {})) self.assertEquals(doc._delta(), ({ 'embedded_field.list_field': ['1', 2, { '_cls': 'Embedded', '_types': ['Embedded'], 'string_field': 'hello', 'dict_field': {'hello': 'world'}, 'int_field': 1, 'list_field': ['1', 2, {'hello': 'world'}], }] }, {})) doc.save() doc.reload() self.assertEquals(doc.embedded_field.list_field[0], '1') self.assertEquals(doc.embedded_field.list_field[1], 2) for k in doc.embedded_field.list_field[2]._fields: self.assertEquals(doc.embedded_field.list_field[2][k], embedded_2[k]) doc.embedded_field.list_field[2].string_field = 'world' self.assertEquals(doc._get_changed_fields(), ['embedded_field.list_field.2.string_field']) self.assertEquals(doc.embedded_field._delta(), ({'list_field.2.string_field': 'world'}, {})) self.assertEquals(doc._delta(), ({'embedded_field.list_field.2.string_field': 'world'}, {})) doc.save() doc.reload() self.assertEquals(doc.embedded_field.list_field[2].string_field, 'world') # Test multiple assignments doc.embedded_field.list_field[2].string_field = 'hello world' doc.embedded_field.list_field[2] = doc.embedded_field.list_field[2] self.assertEquals(doc._get_changed_fields(), ['embedded_field.list_field']) self.assertEquals(doc.embedded_field._delta(), ({ 'list_field': ['1', 2, { '_types': ['Embedded'], '_cls': 'Embedded', 'string_field': 'hello world', 'int_field': 1, 'list_field': ['1', 2, {'hello': 'world'}], 'dict_field': {'hello': 'world'}}]}, {})) self.assertEquals(doc._delta(), ({ 'embedded_field.list_field': ['1', 2, { '_types': ['Embedded'], '_cls': 'Embedded', 'string_field': 'hello world', 'int_field': 1, 'list_field': ['1', 2, {'hello': 'world'}], 'dict_field': {'hello': 'world'}} ]}, {})) doc.save() doc.reload() self.assertEquals(doc.embedded_field.list_field[2].string_field, 'hello world') # Test list native methods doc.embedded_field.list_field[2].list_field.pop(0) self.assertEquals(doc._delta(), ({'embedded_field.list_field.2.list_field': [2, {'hello': 'world'}]}, {})) doc.save() doc.reload() doc.embedded_field.list_field[2].list_field.append(1) self.assertEquals(doc._delta(), ({'embedded_field.list_field.2.list_field': [2, {'hello': 'world'}, 1]}, {})) doc.save() doc.reload() self.assertEquals(doc.embedded_field.list_field[2].list_field, [2, {'hello': 'world'}, 1]) doc.embedded_field.list_field[2].list_field.sort() doc.save() doc.reload() self.assertEquals(doc.embedded_field.list_field[2].list_field, [1, 2, {'hello': 'world'}]) del(doc.embedded_field.list_field[2].list_field[2]['hello']) self.assertEquals(doc._delta(), ({'embedded_field.list_field.2.list_field': [1, 2, {}]}, {})) doc.save() doc.reload() del(doc.embedded_field.list_field[2].list_field) self.assertEquals(doc._delta(), ({}, {'embedded_field.list_field.2.list_field': 1})) doc.save() doc.reload() doc.dict_field['Embedded'] = embedded_1 doc.save() doc.reload() doc.dict_field['Embedded'].string_field = 'Hello World' self.assertEquals(doc._get_changed_fields(), ['dict_field.Embedded.string_field']) self.assertEquals(doc._delta(), ({'dict_field.Embedded.string_field': 'Hello World'}, {})) def test_delta_db_field(self): class Doc(Document): string_field = StringField(db_field='db_string_field') int_field = IntField(db_field='db_int_field') dict_field = DictField(db_field='db_dict_field') list_field = ListField(db_field='db_list_field') Doc.drop_collection() doc = Doc() doc.save() doc = Doc.objects.first() self.assertEquals(doc._get_changed_fields(), []) self.assertEquals(doc._delta(), ({}, {})) doc.string_field = 'hello' self.assertEquals(doc._get_changed_fields(), ['db_string_field']) self.assertEquals(doc._delta(), ({'db_string_field': 'hello'}, {})) doc._changed_fields = [] doc.int_field = 1 self.assertEquals(doc._get_changed_fields(), ['db_int_field']) self.assertEquals(doc._delta(), ({'db_int_field': 1}, {})) doc._changed_fields = [] dict_value = {'hello': 'world', 'ping': 'pong'} doc.dict_field = dict_value self.assertEquals(doc._get_changed_fields(), ['db_dict_field']) self.assertEquals(doc._delta(), ({'db_dict_field': dict_value}, {})) doc._changed_fields = [] list_value = ['1', 2, {'hello': 'world'}] doc.list_field = list_value self.assertEquals(doc._get_changed_fields(), ['db_list_field']) self.assertEquals(doc._delta(), ({'db_list_field': list_value}, {})) # Test unsetting doc._changed_fields = [] doc.dict_field = {} self.assertEquals(doc._get_changed_fields(), ['db_dict_field']) self.assertEquals(doc._delta(), ({}, {'db_dict_field': 1})) doc._changed_fields = [] doc.list_field = [] self.assertEquals(doc._get_changed_fields(), ['db_list_field']) self.assertEquals(doc._delta(), ({}, {'db_list_field': 1})) # Test it saves that data doc = Doc() doc.save() doc.string_field = 'hello' doc.int_field = 1 doc.dict_field = {'hello': 'world'} doc.list_field = ['1', 2, {'hello': 'world'}] doc.save() doc.reload() self.assertEquals(doc.string_field, 'hello') self.assertEquals(doc.int_field, 1) self.assertEquals(doc.dict_field, {'hello': 'world'}) self.assertEquals(doc.list_field, ['1', 2, {'hello': 'world'}]) def test_delta_recursive_db_field(self): class Embedded(EmbeddedDocument): string_field = StringField(db_field='db_string_field') int_field = IntField(db_field='db_int_field') dict_field = DictField(db_field='db_dict_field') list_field = ListField(db_field='db_list_field') class Doc(Document): string_field = StringField(db_field='db_string_field') int_field = IntField(db_field='db_int_field') dict_field = DictField(db_field='db_dict_field') list_field = ListField(db_field='db_list_field') embedded_field = EmbeddedDocumentField(Embedded, db_field='db_embedded_field') Doc.drop_collection() doc = Doc() doc.save() doc = Doc.objects.first() self.assertEquals(doc._get_changed_fields(), []) self.assertEquals(doc._delta(), ({}, {})) embedded_1 = Embedded() embedded_1.string_field = 'hello' embedded_1.int_field = 1 embedded_1.dict_field = {'hello': 'world'} embedded_1.list_field = ['1', 2, {'hello': 'world'}] doc.embedded_field = embedded_1 self.assertEquals(doc._get_changed_fields(), ['db_embedded_field']) embedded_delta = { '_types': ['Embedded'], '_cls': 'Embedded', 'db_string_field': 'hello', 'db_int_field': 1, 'db_dict_field': {'hello': 'world'}, 'db_list_field': ['1', 2, {'hello': 'world'}] } self.assertEquals(doc.embedded_field._delta(), (embedded_delta, {})) self.assertEquals(doc._delta(), ({'db_embedded_field': embedded_delta}, {})) doc.save() doc.reload() doc.embedded_field.dict_field = {} self.assertEquals(doc._get_changed_fields(), ['db_embedded_field.db_dict_field']) self.assertEquals(doc.embedded_field._delta(), ({}, {'db_dict_field': 1})) self.assertEquals(doc._delta(), ({}, {'db_embedded_field.db_dict_field': 1})) doc.save() doc.reload() self.assertEquals(doc.embedded_field.dict_field, {}) doc.embedded_field.list_field = [] self.assertEquals(doc._get_changed_fields(), ['db_embedded_field.db_list_field']) self.assertEquals(doc.embedded_field._delta(), ({}, {'db_list_field': 1})) self.assertEquals(doc._delta(), ({}, {'db_embedded_field.db_list_field': 1})) doc.save() doc.reload() self.assertEquals(doc.embedded_field.list_field, []) embedded_2 = Embedded() embedded_2.string_field = 'hello' embedded_2.int_field = 1 embedded_2.dict_field = {'hello': 'world'} embedded_2.list_field = ['1', 2, {'hello': 'world'}] doc.embedded_field.list_field = ['1', 2, embedded_2] self.assertEquals(doc._get_changed_fields(), ['db_embedded_field.db_list_field']) self.assertEquals(doc.embedded_field._delta(), ({ 'db_list_field': ['1', 2, { '_cls': 'Embedded', '_types': ['Embedded'], 'db_string_field': 'hello', 'db_dict_field': {'hello': 'world'}, 'db_int_field': 1, 'db_list_field': ['1', 2, {'hello': 'world'}], }] }, {})) self.assertEquals(doc._delta(), ({ 'db_embedded_field.db_list_field': ['1', 2, { '_cls': 'Embedded', '_types': ['Embedded'], 'db_string_field': 'hello', 'db_dict_field': {'hello': 'world'}, 'db_int_field': 1, 'db_list_field': ['1', 2, {'hello': 'world'}], }] }, {})) doc.save() doc.reload() self.assertEquals(doc.embedded_field.list_field[0], '1') self.assertEquals(doc.embedded_field.list_field[1], 2) for k in doc.embedded_field.list_field[2]._fields: self.assertEquals(doc.embedded_field.list_field[2][k], embedded_2[k]) doc.embedded_field.list_field[2].string_field = 'world' self.assertEquals(doc._get_changed_fields(), ['db_embedded_field.db_list_field.2.db_string_field']) self.assertEquals(doc.embedded_field._delta(), ({'db_list_field.2.db_string_field': 'world'}, {})) self.assertEquals(doc._delta(), ({'db_embedded_field.db_list_field.2.db_string_field': 'world'}, {})) doc.save() doc.reload() self.assertEquals(doc.embedded_field.list_field[2].string_field, 'world') # Test multiple assignments doc.embedded_field.list_field[2].string_field = 'hello world' doc.embedded_field.list_field[2] = doc.embedded_field.list_field[2] self.assertEquals(doc._get_changed_fields(), ['db_embedded_field.db_list_field']) self.assertEquals(doc.embedded_field._delta(), ({ 'db_list_field': ['1', 2, { '_types': ['Embedded'], '_cls': 'Embedded', 'db_string_field': 'hello world', 'db_int_field': 1, 'db_list_field': ['1', 2, {'hello': 'world'}], 'db_dict_field': {'hello': 'world'}}]}, {})) self.assertEquals(doc._delta(), ({ 'db_embedded_field.db_list_field': ['1', 2, { '_types': ['Embedded'], '_cls': 'Embedded', 'db_string_field': 'hello world', 'db_int_field': 1, 'db_list_field': ['1', 2, {'hello': 'world'}], 'db_dict_field': {'hello': 'world'}} ]}, {})) doc.save() doc.reload() self.assertEquals(doc.embedded_field.list_field[2].string_field, 'hello world') # Test list native methods doc.embedded_field.list_field[2].list_field.pop(0) self.assertEquals(doc._delta(), ({'db_embedded_field.db_list_field.2.db_list_field': [2, {'hello': 'world'}]}, {})) doc.save() doc.reload() doc.embedded_field.list_field[2].list_field.append(1) self.assertEquals(doc._delta(), ({'db_embedded_field.db_list_field.2.db_list_field': [2, {'hello': 'world'}, 1]}, {})) doc.save() doc.reload() self.assertEquals(doc.embedded_field.list_field[2].list_field, [2, {'hello': 'world'}, 1]) doc.embedded_field.list_field[2].list_field.sort() doc.save() doc.reload() self.assertEquals(doc.embedded_field.list_field[2].list_field, [1, 2, {'hello': 'world'}]) del(doc.embedded_field.list_field[2].list_field[2]['hello']) self.assertEquals(doc._delta(), ({'db_embedded_field.db_list_field.2.db_list_field': [1, 2, {}]}, {})) doc.save() doc.reload() del(doc.embedded_field.list_field[2].list_field) self.assertEquals(doc._delta(), ({}, {'db_embedded_field.db_list_field.2.db_list_field': 1})) def test_save_only_changed_fields(self): """Ensure save only sets / unsets changed fields """ class User(self.Person): active = BooleanField(default=True) User.drop_collection() # Create person object and save it to the database user = User(name='Test User', age=30, active=True) user.save() user.reload() # Simulated Race condition same_person = self.Person.objects.get() same_person.active = False user.age = 21 user.save() same_person.name = 'User' same_person.save() person = self.Person.objects.get() self.assertEquals(person.name, 'User') self.assertEquals(person.age, 21) self.assertEquals(person.active, False) def test_save_only_changed_fields_recursive(self): """Ensure save only sets / unsets changed fields """ class Comment(EmbeddedDocument): published = BooleanField(default=True) class User(self.Person): comments_dict = DictField() comments = ListField(EmbeddedDocumentField(Comment)) active = BooleanField(default=True) User.drop_collection() # Create person object and save it to the database person = User(name='Test User', age=30, active=True) person.comments.append(Comment()) person.save() person.reload() person = self.Person.objects.get() self.assertTrue(person.comments[0].published) person.comments[0].published = False person.save() person = self.Person.objects.get() self.assertFalse(person.comments[0].published) # Simple dict w person.comments_dict['first_post'] = Comment() person.save() person = self.Person.objects.get() self.assertTrue(person.comments_dict['first_post'].published) person.comments_dict['first_post'].published = False person.save() person = self.Person.objects.get() self.assertFalse(person.comments_dict['first_post'].published) def test_delete(self): """Ensure that document may be deleted using the delete method. """ person = self.Person(name="Test User", age=30) person.save() self.assertEqual(len(self.Person.objects), 1) person.delete() self.assertEqual(len(self.Person.objects), 0) def test_save_custom_id(self): """Ensure that a document may be saved with a custom _id. """ # Create person object and save it to the database person = self.Person(name='Test User', age=30, id='497ce96f395f2f052a494fd4') person.save() # Ensure that the object is in the database with the correct _id collection = self.db[self.Person._get_collection_name()] person_obj = collection.find_one({'name': 'Test User'}) self.assertEqual(str(person_obj['_id']), '497ce96f395f2f052a494fd4') def test_save_custom_pk(self): """Ensure that a document may be saved with a custom _id using pk alias. """ # Create person object and save it to the database person = self.Person(name='Test User', age=30, pk='497ce96f395f2f052a494fd4') person.save() # Ensure that the object is in the database with the correct _id collection = self.db[self.Person._get_collection_name()] person_obj = collection.find_one({'name': 'Test User'}) self.assertEqual(str(person_obj['_id']), '497ce96f395f2f052a494fd4') def test_save_list(self): """Ensure that a list field may be properly saved. """ class Comment(EmbeddedDocument): content = StringField() class BlogPost(Document): content = StringField() comments = ListField(EmbeddedDocumentField(Comment)) tags = ListField(StringField()) BlogPost.drop_collection() post = BlogPost(content='Went for a walk today...') post.tags = tags = ['fun', 'leisure'] comments = [Comment(content='Good for you'), Comment(content='Yay.')] post.comments = comments post.save() collection = self.db[BlogPost._get_collection_name()] post_obj = collection.find_one() self.assertEqual(post_obj['tags'], tags) for comment_obj, comment in zip(post_obj['comments'], comments): self.assertEqual(comment_obj['content'], comment['content']) BlogPost.drop_collection() def test_list_search_by_embedded(self): class User(Document): username = StringField(required=True) meta = {'allow_inheritance': False} class Comment(EmbeddedDocument): comment = StringField() user = ReferenceField(User, required=True) meta = {'allow_inheritance': False} class Page(Document): comments = ListField(EmbeddedDocumentField(Comment)) meta = {'allow_inheritance': False, 'indexes': [ {'fields': ['comments.user']} ]} User.drop_collection() Page.drop_collection() u1 = User(username="wilson") u1.save() u2 = User(username="rozza") u2.save() u3 = User(username="hmarr") u3.save() p1 = Page(comments = [Comment(user=u1, comment="Its very good"), Comment(user=u2, comment="Hello world"), Comment(user=u3, comment="Ping Pong"), Comment(user=u1, comment="I like a beer")]) p1.save() p2 = Page(comments = [Comment(user=u1, comment="Its very good"), Comment(user=u2, comment="Hello world")]) p2.save() p3 = Page(comments = [Comment(user=u3, comment="Its very good")]) p3.save() p4 = Page(comments = [Comment(user=u2, comment="Heavy Metal song")]) p4.save() self.assertEqual([p1, p2], list(Page.objects.filter(comments__user=u1))) self.assertEqual([p1, p2, p4], list(Page.objects.filter(comments__user=u2))) self.assertEqual([p1, p3], list(Page.objects.filter(comments__user=u3))) def test_save_embedded_document(self): """Ensure that a document with an embedded document field may be saved in the database. """ class EmployeeDetails(EmbeddedDocument): position = StringField() class Employee(self.Person): salary = IntField() details = EmbeddedDocumentField(EmployeeDetails) # Create employee object and save it to the database employee = Employee(name='Test Employee', age=50, salary=20000) employee.details = EmployeeDetails(position='Developer') employee.save() # Ensure that the object is in the database collection = self.db[self.Person._get_collection_name()] employee_obj = collection.find_one({'name': 'Test Employee'}) self.assertEqual(employee_obj['name'], 'Test Employee') self.assertEqual(employee_obj['age'], 50) # Ensure that the 'details' embedded object saved correctly self.assertEqual(employee_obj['details']['position'], 'Developer') def test_updating_an_embedded_document(self): """Ensure that a document with an embedded document field may be saved in the database. """ class EmployeeDetails(EmbeddedDocument): position = StringField() class Employee(self.Person): salary = IntField() details = EmbeddedDocumentField(EmployeeDetails) # Create employee object and save it to the database employee = Employee(name='Test Employee', age=50, salary=20000) employee.details = EmployeeDetails(position='Developer') employee.save() # Test updating an embedded document promoted_employee = Employee.objects.get(name='Test Employee') promoted_employee.details.position = 'Senior Developer' promoted_employee.save() promoted_employee.reload() self.assertEqual(promoted_employee.name, 'Test Employee') self.assertEqual(promoted_employee.age, 50) # Ensure that the 'details' embedded object saved correctly self.assertEqual(promoted_employee.details.position, 'Senior Developer') # Test removal promoted_employee.details = None promoted_employee.save() promoted_employee.reload() self.assertEqual(promoted_employee.details, None) def test_mixins_dont_add_to_types(self): class Bob(Document): name = StringField() Bob.drop_collection() p = Bob(name="Rozza") p.save() Bob.drop_collection() class Person(Document, Mixin): pass Person.drop_collection() p = Person(name="Rozza") p.save() self.assertEquals(p._fields.keys(), ['name', 'id']) collection = self.db[Person._get_collection_name()] obj = collection.find_one() self.assertEquals(obj['_cls'], 'Person') self.assertEquals(obj['_types'], ['Person']) self.assertEquals(Person.objects.count(), 1) rozza = Person.objects.get(name="Rozza") Person.drop_collection() def test_save_reference(self): """Ensure that a document reference field may be saved in the database. """ class BlogPost(Document): meta = {'collection': 'blogpost_1'} content = StringField() author = ReferenceField(self.Person) BlogPost.drop_collection() author = self.Person(name='Test User') author.save() post = BlogPost(content='Watched some TV today... how exciting.') # Should only reference author when saving post.author = author post.save() post_obj = BlogPost.objects.first() # Test laziness self.assertTrue(isinstance(post_obj._data['author'], pymongo.dbref.DBRef)) self.assertTrue(isinstance(post_obj.author, self.Person)) self.assertEqual(post_obj.author.name, 'Test User') # Ensure that the dereferenced object may be changed and saved post_obj.author.age = 25 post_obj.author.save() author = list(self.Person.objects(name='Test User'))[-1] self.assertEqual(author.age, 25) BlogPost.drop_collection() def test_reverse_delete_rule_cascade_and_nullify(self): """Ensure that a referenced document is also deleted upon deletion. """ class BlogPost(Document): content = StringField() author = ReferenceField(self.Person, reverse_delete_rule=CASCADE) reviewer = ReferenceField(self.Person, reverse_delete_rule=NULLIFY) self.Person.drop_collection() BlogPost.drop_collection() author = self.Person(name='Test User') author.save() reviewer = self.Person(name='Re Viewer') reviewer.save() post = BlogPost(content = 'Watched some TV') post.author = author post.reviewer = reviewer post.save() reviewer.delete() self.assertEqual(len(BlogPost.objects), 1) # No effect on the BlogPost self.assertEqual(BlogPost.objects.get().reviewer, None) # Delete the Person, which should lead to deletion of the BlogPost, too author.delete() self.assertEqual(len(BlogPost.objects), 0) def test_reverse_delete_rule_cascade_recurs(self): """Ensure that a chain of documents is also deleted upon cascaded deletion. """ class BlogPost(Document): content = StringField() author = ReferenceField(self.Person, reverse_delete_rule=CASCADE) class Comment(Document): text = StringField() post = ReferenceField(BlogPost, reverse_delete_rule=CASCADE) self.Person.drop_collection() BlogPost.drop_collection() Comment.drop_collection() author = self.Person(name='Test User') author.save() post = BlogPost(content = 'Watched some TV') post.author = author post.save() comment = Comment(text = 'Kudos.') comment.post = post comment.save() # Delete the Person, which should lead to deletion of the BlogPost, and, # recursively to the Comment, too author.delete() self.assertEqual(len(Comment.objects), 0) self.Person.drop_collection() BlogPost.drop_collection() Comment.drop_collection() def test_reverse_delete_rule_deny(self): """Ensure that a document cannot be referenced if there are still documents referring to it. """ class BlogPost(Document): content = StringField() author = ReferenceField(self.Person, reverse_delete_rule=DENY) self.Person.drop_collection() BlogPost.drop_collection() author = self.Person(name='Test User') author.save() post = BlogPost(content = 'Watched some TV') post.author = author post.save() # Delete the Person should be denied self.assertRaises(OperationError, author.delete) # Should raise denied error self.assertEqual(len(BlogPost.objects), 1) # No objects may have been deleted self.assertEqual(len(self.Person.objects), 1) # Other users, that don't have BlogPosts must be removable, like normal author = self.Person(name='Another User') author.save() self.assertEqual(len(self.Person.objects), 2) author.delete() self.assertEqual(len(self.Person.objects), 1) self.Person.drop_collection() BlogPost.drop_collection() def subclasses_and_unique_keys_works(self): class A(Document): pass class B(A): foo = BooleanField(unique=True) A.drop_collection() B.drop_collection() A().save() A().save() B(foo=True).save() self.assertEquals(A.objects.count(), 2) self.assertEquals(B.objects.count(), 1) A.drop_collection() B.drop_collection() def test_document_hash(self): """Test document in list, dict, set """ class User(Document): pass class BlogPost(Document): pass # Clear old datas User.drop_collection() BlogPost.drop_collection() u1 = User.objects.create() u2 = User.objects.create() u3 = User.objects.create() u4 = User() # New object b1 = BlogPost.objects.create() b2 = BlogPost.objects.create() # in List all_user_list = list(User.objects.all()) self.assertTrue(u1 in all_user_list) self.assertTrue(u2 in all_user_list) self.assertTrue(u3 in all_user_list) self.assertFalse(u4 in all_user_list) # New object self.assertFalse(b1 in all_user_list) # Other object self.assertFalse(b2 in all_user_list) # Other object # in Dict all_user_dic = {} for u in User.objects.all(): all_user_dic[u] = "OK" self.assertEqual(all_user_dic.get(u1, False), "OK" ) self.assertEqual(all_user_dic.get(u2, False), "OK" ) self.assertEqual(all_user_dic.get(u3, False), "OK" ) self.assertEqual(all_user_dic.get(u4, False), False ) # New object self.assertEqual(all_user_dic.get(b1, False), False ) # Other object self.assertEqual(all_user_dic.get(b2, False), False ) # Other object # in Set all_user_set = set(User.objects.all()) self.assertTrue(u1 in all_user_set ) def test_picklable(self): pickle_doc = PickleTest(number=1, string="One", lists=['1', '2']) pickle_doc.embedded = PickleEmbedded() pickle_doc.save() pickled_doc = pickle.dumps(pickle_doc) resurrected = pickle.loads(pickled_doc) self.assertEquals(resurrected, pickle_doc) resurrected.string = "Two" resurrected.save() pickle_doc.reload() self.assertEquals(resurrected, pickle_doc) def throw_invalid_document_error(self): # test handles people trying to upsert def throw_invalid_document_error(): class Blog(Document): validate = DictField() self.assertRaises(InvalidDocumentError, throw_invalid_document_error) if __name__ == '__main__': unittest.main()
KarimAllah/mongoengine
tests/document.py
Python
mit
79,347
[ "exciting" ]
75c36b5807e1487e28cc78ea7865cd8591a2576445fbcf076085639803b5b9e1
#!/usr/bin/python import getopt import sys from Bio import SeqIO import time import os import shutil import pandas __author__ = "Andriy Sheremet" #Helper functions definitions def parse_contigs_ind(f_name): """ Returns sequences index from the input files(s) remember to close index object after use """ handle = open(f_name, "rU") record_dict = SeqIO.index(f_name,"fasta") handle.close() return record_dict #returning specific sequences and overal list def retrive_sequence(contig_lst, rec_dic): """ Returns list of sequence elements from dictionary/index of SeqIO objects specific to the contig_lst parameter """ contig_seqs = list() #record_dict = rec_dic #handle.close() for contig in contig_lst: contig_seqs.append(str(rec_dic[contig].seq))#fixing BiopythonDeprecationWarning return contig_seqs def filter_seq_dict(key_lst, rec_dic): """ Returns filtered dictionary element from rec_dic according to sequence names passed in key_lst """ return { key: rec_dic[key] for key in key_lst } def unique_scaffold_topEval(dataframe): #returns pandas series object variables = list(dataframe.columns.values) scaffolds=dict() rows=list() for row in dataframe.itertuples(): #if row[1]=='Ga0073928_10002560': if row[1] not in scaffolds: scaffolds[row[1]]=row else: if row[11]<scaffolds[row[1]][11]: scaffolds[row[1]]=row rows=scaffolds.values() #variables=['quid', 'suid', 'iden', 'alen', 'mism', 'gapo', 'qsta', 'qend', 'ssta', 'send', 'eval', 'bits'] df = pandas.DataFrame([[getattr(i,j) for j in variables] for i in rows], columns = variables) return df def close_ind_lst(ind_lst): """ Closes index objects supplied in input parameter list """ for index in ind_lst: index.close() def usage(): print "\nThis is the usage function\n" # print 'Usage: '+sys.argv[0]+' -i <input_file> [-o <output>] [-l <minimum length>]' # print 'Example: '+sys.argv[0]+' -i input.fasta -o output.fasta -l 100' def main(argv): #default parameters mg_lst = [] ref_lst = [] e_val = 1e-5 alen = 50.0 alen_percent = True alen_bp = False iden = 95.0 name= "output" fmt_lst = ["fasta"] supported_formats =["fasta", "csv"] iterations = 1 alen_increment = 5.0 iden_increment = 0.0 blast_db_Dir = "" results_Dir = "" input_files_Dir = "" ref_out_0 = "" blasted_lst = [] continue_from_previous = False #poorly supported, just keeping the directories skip_blasting = False debugging = False try: opts, args = getopt.getopt(argv, "r:m:n:e:a:i:f:h", ["reference=", "metagenome=", "name=", "e_value=", "alignment_length=", "identity=","format=", "iterations=", "alen_increment=", "iden_increment=","continue_from_previous","skip_blasting","debugging", "help"]) except getopt.GetoptError: usage() sys.exit(2) for opt, arg in opts: if opt in ("-h", "--help"): usage() sys.exit() # elif opt in ("--recover_after_failure"): # recover_after_failure = True # print "Recover after failure:", recover_after_failure elif opt in ("--skip_blasting"): skip_blasting = True if debugging: print "Blasting step omitted; Using previous blast output." elif opt in ("--continue_from_previous"): continue_from_previous = True if debugging: print "Continue after failure:", continue_from_previous elif opt in ("--debugging"): debugging = True if debugging: print "Debugging messages:", debugging elif opt in ("-r", "--reference"): if arg: ref_lst=arg.split(',') #infiles = arg if debugging: print "Reference file(s)", ref_lst elif opt in ("-m", "--metagenome"): if arg: mg_lst=arg.split(',') #infiles = arg if debugging: print "Metagenome file(s)", mg_lst elif opt in ("-f", "--format"): if arg: fmt_lst=arg.split(',') #infiles = arg if debugging: print "Output format(s)", fmt_lst elif opt in ("-n", "--name"): if arg.strip(): name = arg if debugging: print "Project name", name elif opt in ("-e", "--e_value"): try: e_val = float(arg) except: print "\nERROR: Please enter numerical value as -e parameter (using default: 1e-5)" usage() sys.exit(1) if debugging: print "E value", e_val elif opt in ("-a", "--alignment_length"): if arg.strip()[-1]=="%": alen_bp = False alen_percent = True else: alen_bp = True alen_percent = False try: alen = float(arg.split("%")[0]) except: print "\nERROR: Please enter an numerical value as -alen parameter (using default: 50.0)" usage() sys.exit(1) if debugging: print "Alignment length", alen elif opt in ("-i", "--identity"): try: iden = float(arg) except: print "\nERROR: Please enter an numerical value as -iden parameter (using default: 95.0)" usage() sys.exit(1) if debugging: print "Alignment length", iden elif opt in ("--iterations"): try: iterations = int(arg) except: print "\nWARNING: Please enter integer value as --iterations parameter (using default: 1)" if debugging: print "Iterations: ", iterations elif opt in ("--alen_increment"): try: alen_increment = float(arg) except: print "\nWARNING: Please enter numerical value as --alen_increment parameter (using default: )", alen_increment if debugging: print "Alignment length increment: ", alen_increment elif opt in ("--iden_increment"): try: iden_increment = float(arg) except: print "\nWARNING: Please enter numerical value as --iden_increment parameter (using default: )", iden_increment if debugging: print "Alignment length increment: ", iden_increment for ref_file in [x for x in ref_lst if x]: try: # with open(ref_file, "rU") as hand_ref: pass except: print "\nERROR: Reference File(s) ["+ref_file+"] doesn't exist" usage() sys.exit(1) for mg_file in [x for x in mg_lst if x]: try: # with open(mg_file, "rU") as hand_mg: pass except: print "\nERROR: Metagenome File(s) ["+mg_file+"] doesn't exist" usage() sys.exit(1) for fmt in [x for x in fmt_lst if x]: if fmt not in supported_formats: print "\nWARNING: Output format [",fmt,"] is not supported" print "\tUse -h(--help) option for the list of supported formats" fmt_lst=["fasta"] print "\tUsing default output format: ", fmt_lst[0] project_dir = name if not continue_from_previous: if os.path.exists(project_dir): shutil.rmtree(project_dir) try: os.mkdir(project_dir) except OSError: print "ERROR: Cannot create project directory: " + name raise print "\n\t Initial Parameters:" print "\nProject Name: ", name,'\n' print "Project Directory: ", os.path.abspath(name),'\n' print "Reference File(s): ", ref_lst,'\n' print "Metagenome File(s): ", mg_lst,'\n' print "E Value: ", e_val, "\n" if alen_percent: print "Alignment Length: "+str(alen)+'%\n' if alen_bp: print "Alignment Length: "+str(alen)+'bp\n' print "Sequence Identity: "+str(iden)+'%\n' print "Output Format(s):", fmt_lst,'\n' if iterations > 1: print "Iterations: ", iterations, '\n' print "Alignment Length Increment: ", alen_increment, '\n' print "Sequence identity Increment: ", iden_increment, '\n' #Initializing directories blast_db_Dir = name+"/blast_db" if not continue_from_previous: if os.path.exists(blast_db_Dir): shutil.rmtree(blast_db_Dir) try: os.mkdir(blast_db_Dir) except OSError: print "ERROR: Cannot create project directory: " + blast_db_Dir raise results_Dir = name+"/results" if not continue_from_previous: if os.path.exists(results_Dir): shutil.rmtree(results_Dir) try: os.mkdir(results_Dir) except OSError: print "ERROR: Cannot create project directory: " + results_Dir raise input_files_Dir = name+"/input_files" if not continue_from_previous: if os.path.exists(input_files_Dir): shutil.rmtree(input_files_Dir) try: os.mkdir(input_files_Dir) except OSError: print "ERROR: Cannot create project directory: " + input_files_Dir raise # Writing raw reference files into a specific input filename input_ref_records = {} for reference in ref_lst: ref_records_ind = parse_contigs_ind(reference) #ref_records = dict(ref_records_ind) input_ref_records.update(ref_records_ind) ref_records_ind.close() #input_ref_records.update(ref_records) ref_out_0 = input_files_Dir+"/reference0.fna" with open(ref_out_0, "w") as handle: SeqIO.write(input_ref_records.values(), handle, "fasta") #NO NEED TO CLOSE with statement will automatically close the file # Making BLAST databases #output fname from before used as input for blast database creation input_ref_0 = ref_out_0 title_db = name+"_db"#add iteration functionality outfile_db = blast_db_Dir+"/iteration"+str(iterations)+"/"+name+"_db"#change into for loop os.system("makeblastdb -in "+input_ref_0+" -dbtype nucl -title "+title_db+" -out "+outfile_db+" -parse_seqids") # BLASTing query contigs if not skip_blasting: print "\nBLASTing query file(s):" for i in range(len(mg_lst)): database = outfile_db # adjust for iterations blasted_lst.append(results_Dir+"/recruited_mg_"+str(i)+".tab") start = time.time() os_string = 'blastn -db '+database+' -query \"'+mg_lst[i]+'\" -out '+blasted_lst[i]+" -evalue "+str(e_val)+" -outfmt 6 -num_threads 8" #print os_string os.system(os_string) print "\t"+mg_lst[i]+"; Time elapsed: "+str(time.time()-start)+" seconds." else: for i in range(len(mg_lst)): blasted_lst.append(results_Dir+"/recruited_mg_"+str(i)+".tab") # Parsing BLAST outputs blast_cols = ['quid', 'suid', 'iden', 'alen', 'mism', 'gapo', 'qsta', 'qend', 'ssta', 'send', 'eval', 'bits'] recruited_mg=[] for i in range(len(mg_lst)): df = pandas.read_csv(blasted_lst[i] ,sep="\t", header=None) df.columns=blast_cols recruited_mg.append(df) # print len(recruited_mg[0]) # print len(recruited_mg[1]) #creating all_records entry #! Remember to close index objects after they are no longer needed #! Use helper function close_ind_lst() all_records = [] print "\nIndexing metagenome file(s):" for i in range(len(mg_lst)): start = time.time() all_records.append(parse_contigs_ind(mg_lst[i])) print "\t"+mg_lst[i]+" Indexed in : "+str(time.time()-start)+" seconds." # Transforming data for i in range(len(mg_lst)): #cutoff_contigs[dataframe]=evalue_filter(cutoff_contigs[dataframe]) recruited_mg[i]=unique_scaffold_topEval(recruited_mg[i]) contig_list = recruited_mg[i]['quid'].tolist() recruited_mg[i]['Seq_nt']=retrive_sequence(contig_list, all_records[i]) recruited_mg[i]['Seq_size']=recruited_mg[i]['Seq_nt'].apply(lambda x: len(x)) recruited_mg[i]['Coverage']=recruited_mg[i]['alen'].apply(lambda x: 100.0*float(x))/recruited_mg[i]['Seq_size'] recruited_mg[i]['Metric']=recruited_mg[i]['Coverage']*recruited_mg[i]['iden']/100.0 recruited_mg[i] = recruited_mg[i][['quid', 'suid', 'iden', 'alen','Coverage','Metric', 'mism', 'gapo', 'qsta', 'qend', 'ssta', 'send', 'eval', 'bits','Seq_size', 'Seq_nt']] # Here would go statistics functions and producing plots # # # # # # Quality filtering before outputting if alen_percent: for i in range(len(recruited_mg)): recruited_mg[i]=recruited_mg[i][(recruited_mg[i]['iden']>=iden)&(recruited_mg[i]['Coverage']>=alen)&(recruited_mg[i]['eval']<=e_val)] if alen_bp: for i in range(len(recruited_mg)): recruited_mg[i]=recruited_mg[i][(recruited_mg[i]['iden']>=iden)&(recruited_mg[i]['alen']>=alen)&(recruited_mg[i]['eval']<=e_val)] # print len(recruited_mg[0]) # print len(recruited_mg[1]) # Batch export to outfmt (csv and/or multiple FASTA) alen_str = "" iden_str = "_iden_"+str(iden)+"%" if alen_percent: alen_str = "_alen_"+str(alen)+"%" if alen_bp: alen_str = "_alen_"+str(alen)+"bp" if iterations > 1: prefix=results_Dir+"/"+name+"_iter_e_"+str(e_val)+iden_str+alen_str+"_recruited_mg_" else: prefix=results_Dir+"/"+name+"_e_"+str(e_val)+iden_str+alen_str+"_recruited_mg_" print "\nWriting files:" for i in range(len(mg_lst)): records= [] # try: # os.remove(outfile1) # except OSError: # pass if "csv" in fmt_lst: outfile1 = prefix+str(i)+".csv" recruited_mg[i].to_csv(outfile1, sep='\t') print str(len(recruited_mg[i]))+" sequences written to "+outfile1 if "fasta" in fmt_lst: ids = recruited_mg[i]['quid'].tolist() #if len(ids)==len(sequences): for j in range(len(ids)): records.append(all_records[i][ids[j]]) outfile2 = prefix+str(i)+".fasta" with open(outfile2, "w") as output_handle: SeqIO.write(records, output_handle, "fasta") print str(len(ids))+" sequences written to "+outfile2 close_ind_lst(all_records) #all_records[i].close()# keep open if multiple iterations #recruited_mg_1 = pandas.read_csv(out_name1 ,sep="\t", header=None) #recruited_mg_1.columns=['quid', 'suid', 'iden', 'alen', 'mism', 'gapo', 'qsta', 'qend', 'ssta', 'send', 'eval', 'bits'] #recruited_mg_2 = pandas.read_csv(out_name2 ,sep="\t", header=None) #recruited_mg_2.columns=['quid', 'suid', 'iden', 'alen', 'mism', 'gapo', 'qsta', 'qend', 'ssta', 'send', 'eval', 'bits'] #recruited_mg = [recruited_mg_1, recruited_mg_2] # blast_db_Dir = "" # results_Dir = "" # input_files_Dir = "" # parsed = SeqIO.parse(handle, "fasta") # # records = list() # # # total = 0 # processed = 0 # for record in parsed: # total += 1 # #print(record.id), len(record.seq) # if len(record.seq) >= length: # processed += 1 # records.append(record) # handle.close() # # print "%d sequences found"%(total) # # try: # output_handle = open(outfile, "w") # SeqIO.write(records, output_handle, "fasta") # output_handle.close() # print "%d sequences written"%(processed) # except: # print "ERROR: Illegal output filename" # sys.exit(1) if __name__ == "__main__": main(sys.argv[1:])
nyirock/mg_blast_wrapper
mg_blast_wrapper_v1.8.py
Python
mit
16,671
[ "BLAST" ]
10d056a46582af7e2add3ef28ba90d5b3019917055f42d69aff348989cd113fa
# -*- coding: utf-8 -*- """ Fitting a 2D Gaussian. @author: Sebastian M. Gaebel @email: sebastian.gaebel@ligo.org """ import matplotlib.pyplot as plt import numpy as np import gaps # Print the available platform and devices gaps.print_devices() # Define a random 2D gaussian x_mean, y_mean = np.random.uniform(-5, 5, 2) x_sigma, y_sigma = 10**np.random.uniform(-1, 0.5, 2) # Generate some samples samples = np.random.normal((x_mean, y_mean), (x_sigma, y_sigma), size=(240, 2)) # Define the logP function in OpenCL code. # cfloat is the medium precision float type of GAPS. kernel_src = """ #define N_POINTS {} """.format(samples.shape[0]) + """ cfloat logP_fn(const cfloat point[N_DIM], __global const cdouble samples[N_POINTS][2]) { const cfloat x_mean = point[0]; const cfloat y_mean = point[1]; const cfloat x_log_sigma = point[2]; const cfloat y_log_sigma = point[3]; if((x_mean < -10) || (x_mean > 10) || (y_mean < -10) || (y_mean > 10) || (x_log_sigma < -3) || (x_log_sigma > 3) || (y_log_sigma < -3) || (y_log_sigma > 3)) { return -INFINITY; } cdouble accumulator = 0; for(size_t i = 0; i < N_POINTS; i++) { accumulator += log_gaussian(samples[i][0], x_mean, exp(x_log_sigma)); accumulator += log_gaussian(samples[i][1], y_mean, exp(y_log_sigma)); } return accumulator; } """ initial_state = np.random.uniform((-10, -10, -3, -3), (10, 10, 3, 3), size=(4096, 4)) blob = gaps.run_sampler(kernel_src, initial_state, n_steps=2400, n_walkers=4096, keys=['x_mean', 'y_mean', 'x_log_sigma', 'y_log_sigma'], data=samples) opts = dict(linestyles=('dashed', 'solid', 'dashed'), colors='r', alpha=0.75) plt.figure() H = plt.hist(blob['x_mean'].flatten(), bins=120) plt.vlines(x_mean, 0, np.max(H[0]), **opts) plt.xlabel('x_mean') plt.show() plt.figure() H = plt.hist(blob['y_mean'].flatten(), bins=120) plt.vlines(y_mean, 0, np.max(H[0]), **opts) plt.xlabel('y_mean') plt.show() plt.figure() H = plt.hist(blob['x_log_sigma'].flatten(), bins=120) plt.vlines(np.log(x_sigma), 0, np.max(H[0]), **opts) plt.xlabel('x_log_sigma') plt.show() plt.figure() H = plt.hist(blob['y_log_sigma'].flatten(), bins=120) plt.vlines(np.log(y_sigma), 0, np.max(H[0]), **opts) plt.xlabel('y_log_sigma') plt.show()
sgaebel/GAPS
examples/fitting_2d_gaussian.py
Python
mit
2,427
[ "Gaussian" ]
b1edd5b3cd89f675ccd9ae55233530472a9636e172e54e1c417e748c07b79fef
""" Enterprise Recovery Email feature tests """ import uuid from regression.pages.enterprise.confirm_recovery_email import ConfirmRecoveryEmail from regression.pages.enterprise.enterprise_const import ( ENT_PORTAL_PASSWORD, ENT_PORTAL_USERNAME ) from regression.pages.whitelabel.reset_password_page import ResetPassword from regression.tests.enterprise.ent_test_base import EnterpriseTestBase from regression.tests.helpers.api_clients import GuerrillaMailApi from regression.tests.helpers.utils import get_random_password class TestEnterpriseRecoveryEmail(EnterpriseTestBase): """ Test Enterprise Recovery Email """ def setUp(self): """ Initialize all page objects """ super().setUp() self.user_name = str(uuid.uuid4().node) self.temp_mail = GuerrillaMailApi(self.user_name) self.user_email = self.temp_mail.user_email def test_enterprise_recovery_email(self): """ Scenario: A user is able to set secondary email """ self.lms_login.visit() # Enterprise portal flow self.login_to_ent_portal( ENT_PORTAL_USERNAME, ENT_PORTAL_PASSWORD) self.access_course() self.ent_edx_login.wait_for_page() # Register a new enterprise user self.register_ent_edx_user() self.ent_course_enrollment.wait_for_page() self.dashboard.visit() # There should be a message to add secondary email account. self.assertTrue(self.dashboard.is_secondary_account_message_visible( 'Add a recovery email' )) new_password = get_random_password() # # Call the fixture to unlink existing account for the user # self.addCleanup(self.unlink_account) # Add secondary email address in account settings page. self.add_recovery_email(self.user_email) # Get the secondary email activation url from the email. recovery_email_url = self.temp_mail.get_url_from_email( 'activate_secondary_email' ) self.dashboard.visit() # There should be a message to activate secondary email account. self.assertTrue(self.dashboard.is_secondary_account_message_visible( 'Recovery email is not activated yet' )) recovery_email_page = ConfirmRecoveryEmail(self.browser, recovery_email_url) recovery_email_page.visit() # Secondary Email Account has been activated. self.assertTrue(recovery_email_page.is_secondary_account_activation_complete) # Unlink existing account for the user and logout. self.unlink_account() # login and go to reset password page to reset the password. self.logout_from_lms_using_api() self.lms_login.visit() self.lms_login.send_forgot_password(self.user_email) self.assertTrue( self.lms_login.is_password_reset_email_message_visible ) # Get reset password url for the email. reset_password_url = self.temp_mail.get_url_from_email( 'password_reset_confirm' ) # Reset password and log back in. reset_password = ResetPassword(self.browser, reset_password_url) reset_password.visit() reset_password.reset_password(new_password) self.lms_login.visit() self.lms_login.provide_info(self.user_email, new_password) self.lms_login.submit() self.dashboard.wait_for_page() self.user_account.visit() self.assertEqual(self.user_email, self.user_account.get_user_email())
edx/edx-e2e-tests
regression/tests/enterprise/test_ent_recovery_email.py
Python
agpl-3.0
3,611
[ "VisIt" ]
836cfd75accd8b9acbc80a0b973b3041b52da153c8620c85ea3ebafcfa0ba454
#!/usr/bin/python3 # -*- coding: utf-8 -*- # # This software is licensed as described in the README.rst and LICENSE files, # which you should have received as part of this distribution. import argparse from raspi_sensor.main import setup_default_mqtt_args from raspi_mc.magnetic_contact import MC def setup_args(): ap = argparse.ArgumentParser(prog='raspi-mc', description='RPi.MC is using magnetic contact switch (door sensor), will permanently ' 'sense for HIGH pin state to detect door status. For more info visit: ' 'https://github.com/ricco386/RPi') setup_default_mqtt_args(ap) return ap.parse_args() def main(): params = setup_args() name = 'Magnetic_Contact' config = None if hasattr(params, 'name') and params.name: name = params.name if hasattr(params, 'config') and params.config: config = params.config mc = MC(name=name, config_path=config) mc.setup_args(params) if hasattr(params, 'status') and params.status: mc.sensor_read() print(mc.get_door_state()) else: mc.sense() if __name__ == "__main__": # execute only if run as a script main()
ricco386/broadcaster
RPi.MC/raspi_mc/main.py
Python
bsd-3-clause
1,283
[ "VisIt" ]
160ed05a0a12a1d3ffeb0fe544ce834edde99facac610d12756833dc064f4270
# -*- coding: utf-8 -*- # # Configuration file for the Sphinx documentation builder. # # This file does only contain a selection of the most common options. For a # full list see the documentation: # http://www.sphinx-doc.org/en/master/config # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import os import sys # -- Project information ----------------------------------------------------- project = 'qmflows-namd' copyright = '2019, Felipe Zapata and Ivan Infante' author = 'Felipe Zapata and Ivan Infante' here = os.path.dirname(__file__) sys.path.insert(0, os.path.abspath(os.path.join(here, '..'))) vers = {} with open(os.path.join(here, '..', '__version__.py')) as f: exec(f.read(), vers) # The short X.Y version version = vers["__version__"] # The full version, including alpha/beta/rc tags release = vers["__version__"] # -- General configuration --------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.mathjax', 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode', 'sphinx.ext.napoleon', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The master toctree document. master_doc = 'index' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] # The name of the Pygments (syntax highlighting) style to use. pygments_style = None # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'sphinx_rtd_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # # html_theme_options = {} # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Custom sidebar templates, must be a dictionary that maps document names # to template names. # # The default sidebars (for documents that don't match any pattern) are # defined by theme itself. Builtin themes are using these templates by # default: ``['localtoc.html', 'relations.html', 'sourcelink.html', # 'searchbox.html']``. # # html_sidebars = {} # -- Options for HTMLHelp output --------------------------------------------- # Output file base name for HTML help builder. htmlhelp_basename = 'qmflows-namd' # -- Options for LaTeX output ------------------------------------------------ latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'qmflows-namd.tex', 'qmflows-namd Documentation', 'Felipe Zapata and Ivan Infante', 'manual'), ] # -- Options for manual page output ------------------------------------------ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'qmflows-namd', 'qmflows-namd Documentation', [author], 1) ] # -- Options for Texinfo output ---------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'qmflows-namd', 'qmflows-namd Documentation', author, 'qmflows-namd', 'One line description of project.', 'Miscellaneous'), ] # -- Options for Epub output ------------------------------------------------- # Bibliographic Dublin Core info. epub_title = project # The unique identifier of the text. This can be a ISBN number # or the project homepage. # # epub_identifier = '' # A unique identification for the text. # # epub_uid = '' # A list of files that should not be packed into the epub file. epub_exclude_files = ['search.html'] # -- Extension configuration ------------------------------------------------- # -- Options for intersphinx extension --------------------------------------- # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = {'https://docs.python.org/': None} # -- Options for todo extension ---------------------------------------------- # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = True
felipeZ/nonAdiabaticCoupling
docs/conf.py
Python
mit
6,106
[ "NAMD" ]
d488628637e38c5bb28166b463f377b767164cead4ca5f37ccf71feb38e7eb27
# (C) British Crown Copyright 2010 - 2018, Met Office # # This file is part of Iris. # # Iris is free software: you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by the # Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Iris is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with Iris. If not, see <http://www.gnu.org/licenses/>. """ Miscellaneous utility functions. """ from __future__ import (absolute_import, division, print_function) from six.moves import (filter, input, map, range, zip) # noqa import six import abc import collections from contextlib import contextmanager import copy import functools import inspect import os import os.path import sys import tempfile import time import cf_units import numpy as np import numpy.ma as ma import iris import iris.exceptions def broadcast_to_shape(array, shape, dim_map): """ Broadcast an array to a given shape. Each dimension of the array must correspond to a dimension in the given shape. Striding is used to repeat the array until it matches the desired shape, returning repeated views on the original array. If you need to write to the resulting array, make a copy first. Args: * array (:class:`numpy.ndarray`-like) An array to broadcast. * shape (:class:`list`, :class:`tuple` etc.): The shape the array should be broadcast to. * dim_map (:class:`list`, :class:`tuple` etc.): A mapping of the dimensions of *array* to their corresponding element in *shape*. *dim_map* must be the same length as the number of dimensions in *array*. Each element of *dim_map* corresponds to a dimension of *array* and its value provides the index in *shape* which the dimension of *array* corresponds to, so the first element of *dim_map* gives the index of *shape* that corresponds to the first dimension of *array* etc. Examples: Broadcasting an array of shape (2, 3) to the shape (5, 2, 6, 3) where the first dimension of the array corresponds to the second element of the desired shape and the second dimension of the array corresponds to the fourth element of the desired shape:: a = np.array([[1, 2, 3], [4, 5, 6]]) b = broadcast_to_shape(a, (5, 2, 6, 3), (1, 3)) Broadcasting an array of shape (48, 96) to the shape (96, 48, 12):: # a is an array of shape (48, 96) result = broadcast_to_shape(a, (96, 48, 12), (1, 0)) """ if len(dim_map) != array.ndim: # We must check for this condition here because we cannot rely on # getting an error from numpy if the dim_map argument is not the # correct length, we might just get a segfault. raise ValueError('dim_map must have an entry for every ' 'dimension of the input array') def _broadcast_helper(a): strides = [0] * len(shape) for idim, dim in enumerate(dim_map): if shape[dim] != a.shape[idim]: # We'll get garbage values if the dimensions of array are not # those indicated by shape. raise ValueError('shape and array are not compatible') strides[dim] = a.strides[idim] return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides) array_view = _broadcast_helper(array) if ma.isMaskedArray(array): if array.mask is ma.nomask: # Degenerate masks can be applied as-is. mask_view = array.mask else: # Mask arrays need to be handled in the same way as the data array. mask_view = _broadcast_helper(array.mask) array_view = ma.array(array_view, mask=mask_view) return array_view def delta(ndarray, dimension, circular=False): """ Calculates the difference between values along a given dimension. Args: * ndarray: The array over which to do the difference. * dimension: The dimension over which to do the difference on ndarray. * circular: If not False then return n results in the requested dimension with the delta between the last and first element included in the result otherwise the result will be of length n-1 (where n is the length of ndarray in the given dimension's direction) If circular is numeric then the value of circular will be added to the last element of the given dimension if the last element is negative, otherwise the value of circular will be subtracted from the last element. The example below illustrates the process:: original array -180, -90, 0, 90 delta (with circular=360): 90, 90, 90, -270+360 .. note:: The difference algorithm implemented is forward difference: >>> import numpy as np >>> import iris.util >>> original = np.array([-180, -90, 0, 90]) >>> iris.util.delta(original, 0) array([90, 90, 90]) >>> iris.util.delta(original, 0, circular=360) array([90, 90, 90, 90]) """ if circular is not False: _delta = np.roll(ndarray, -1, axis=dimension) last_element = [slice(None, None)] * ndarray.ndim last_element[dimension] = slice(-1, None) if not isinstance(circular, bool): result = np.where(ndarray[last_element] >= _delta[last_element])[0] _delta[last_element] -= circular _delta[last_element][result] += 2*circular np.subtract(_delta, ndarray, _delta) else: _delta = np.diff(ndarray, axis=dimension) return _delta def describe_diff(cube_a, cube_b, output_file=None): """ Prints the differences that prevent compatibility between two cubes, as defined by :meth:`iris.cube.Cube.is_compatible()`. Args: * cube_a: An instance of :class:`iris.cube.Cube` or :class:`iris.cube.CubeMetadata`. * cube_b: An instance of :class:`iris.cube.Cube` or :class:`iris.cube.CubeMetadata`. * output_file: A :class:`file` or file-like object to receive output. Defaults to sys.stdout. .. seealso:: :meth:`iris.cube.Cube.is_compatible()` .. note:: Compatibility does not guarantee that two cubes can be merged. Instead, this function is designed to provide a verbose description of the differences in metadata between two cubes. Determining whether two cubes will merge requires additional logic that is beyond the scope of this function. """ if output_file is None: output_file = sys.stdout if cube_a.is_compatible(cube_b): output_file.write('Cubes are compatible\n') else: common_keys = set(cube_a.attributes).intersection(cube_b.attributes) for key in common_keys: if np.any(cube_a.attributes[key] != cube_b.attributes[key]): output_file.write('"%s" cube_a attribute value "%s" is not ' 'compatible with cube_b ' 'attribute value "%s"\n' % (key, cube_a.attributes[key], cube_b.attributes[key])) if cube_a.name() != cube_b.name(): output_file.write('cube_a name "%s" is not compatible ' 'with cube_b name "%s"\n' % (cube_a.name(), cube_b.name())) if cube_a.units != cube_b.units: output_file.write( 'cube_a units "%s" are not compatible with cube_b units "%s"\n' % (cube_a.units, cube_b.units)) if cube_a.cell_methods != cube_b.cell_methods: output_file.write('Cell methods\n%s\nand\n%s\nare not compatible\n' % (cube_a.cell_methods, cube_b.cell_methods)) def guess_coord_axis(coord): """ Returns a "best guess" axis name of the coordinate. Heuristic categorisation of the coordinate into either label 'T', 'Z', 'Y', 'X' or None. Args: * coord: The :class:`iris.coords.Coord`. Returns: 'T', 'Z', 'Y', 'X', or None. """ axis = None if coord.standard_name in ('longitude', 'grid_longitude', 'projection_x_coordinate'): axis = 'X' elif coord.standard_name in ('latitude', 'grid_latitude', 'projection_y_coordinate'): axis = 'Y' elif (coord.units.is_convertible('hPa') or coord.attributes.get('positive') in ('up', 'down')): axis = 'Z' elif coord.units.is_time_reference(): axis = 'T' return axis def rolling_window(a, window=1, step=1, axis=-1): """ Make an ndarray with a rolling window of the last dimension Args: * a : array_like Array to add rolling window to Kwargs: * window : int Size of rolling window * step : int Size of step between rolling windows * axis : int Axis to take the rolling window over Returns: Array that is a view of the original array with an added dimension of the size of the given window at axis + 1. Examples:: >>> x = np.arange(10).reshape((2, 5)) >>> rolling_window(x, 3) array([[[0, 1, 2], [1, 2, 3], [2, 3, 4]], [[5, 6, 7], [6, 7, 8], [7, 8, 9]]]) Calculate rolling mean of last dimension:: >>> np.mean(rolling_window(x, 3), -1) array([[ 1., 2., 3.], [ 6., 7., 8.]]) """ # NOTE: The implementation of this function originates from # https://github.com/numpy/numpy/pull/31#issuecomment-1304851 04/08/2011 if window < 1: raise ValueError("`window` must be at least 1.") if window > a.shape[axis]: raise ValueError("`window` is too long.") if step < 1: raise ValueError("`step` must be at least 1.") axis = axis % a.ndim num_windows = (a.shape[axis] - window + step) // step shape = a.shape[:axis] + (num_windows, window) + a.shape[axis + 1:] strides = (a.strides[:axis] + (step * a.strides[axis], a.strides[axis]) + a.strides[axis + 1:]) rw = np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides) if ma.isMaskedArray(a): mask = ma.getmaskarray(a) strides = (mask.strides[:axis] + (step * mask.strides[axis], mask.strides[axis]) + mask.strides[axis + 1:]) rw = ma.array(rw, mask=np.lib.stride_tricks.as_strided( mask, shape=shape, strides=strides)) return rw def array_equal(array1, array2): """ Returns whether two arrays have the same shape and elements. This provides the same functionality as :func:`numpy.array_equal` but with additional support for arrays of strings. """ array1, array2 = np.asarray(array1), np.asarray(array2) if array1.shape != array2.shape: eq = False else: eq = bool(np.asarray(array1 == array2).all()) return eq def approx_equal(a, b, max_absolute_error=1e-10, max_relative_error=1e-10): """ Returns whether two numbers are almost equal, allowing for the finite precision of floating point numbers. """ # Deal with numbers close to zero if abs(a - b) < max_absolute_error: return True # Ensure we get consistent results if "a" and "b" are supplied in the # opposite order. max_ab = max([a, b], key=abs) relative_error = abs(a - b) / max_ab return relative_error < max_relative_error def between(lh, rh, lh_inclusive=True, rh_inclusive=True): """ Provides a convenient way of defining a 3 element inequality such as ``a < number < b``. Arguments: * lh The left hand element of the inequality * rh The right hand element of the inequality Keywords: * lh_inclusive - boolean Affects the left hand comparison operator to use in the inequality. True for ``<=`` false for ``<``. Defaults to True. * rh_inclusive - boolean Same as lh_inclusive but for right hand operator. For example:: between_3_and_6 = between(3, 6) for i in range(10): print(i, between_3_and_6(i)) between_3_and_6 = between(3, 6, rh_inclusive=False) for i in range(10): print(i, between_3_and_6(i)) """ if lh_inclusive and rh_inclusive: return lambda c: lh <= c <= rh elif lh_inclusive and not rh_inclusive: return lambda c: lh <= c < rh elif not lh_inclusive and rh_inclusive: return lambda c: lh < c <= rh else: return lambda c: lh < c < rh def reverse(array, axes): """ Reverse the array along the given axes. Args: * array The array to reverse * axes A single value or array of values of axes to reverse :: >>> import numpy as np >>> a = np.arange(24).reshape(2, 3, 4) >>> print(a) [[[ 0 1 2 3] [ 4 5 6 7] [ 8 9 10 11]] <BLANKLINE> [[12 13 14 15] [16 17 18 19] [20 21 22 23]]] >>> print(reverse(a, 1)) [[[ 8 9 10 11] [ 4 5 6 7] [ 0 1 2 3]] <BLANKLINE> [[20 21 22 23] [16 17 18 19] [12 13 14 15]]] >>> print(reverse(a, [1, 2])) [[[11 10 9 8] [ 7 6 5 4] [ 3 2 1 0]] <BLANKLINE> [[23 22 21 20] [19 18 17 16] [15 14 13 12]]] """ index = [slice(None, None)] * array.ndim axes = np.array(axes, ndmin=1) if axes.ndim != 1: raise ValueError('Reverse was expecting a single axis or a 1d array ' 'of axes, got %r' % axes) if np.min(axes) < 0 or np.max(axes) > array.ndim-1: raise ValueError('An axis value out of range for the number of ' 'dimensions from the given array (%s) was received. ' 'Got: %r' % (array.ndim, axes)) for axis in axes: index[axis] = slice(None, None, -1) return array[tuple(index)] def monotonic(array, strict=False, return_direction=False): """ Return whether the given 1d array is monotonic. Note that, the array must not contain missing data. Kwargs: * strict (boolean) Flag to enable strict monotonic checking * return_direction (boolean) Flag to change return behaviour to return (monotonic_status, direction). Direction will be 1 for positive or -1 for negative. The direction is meaningless if the array is not monotonic. Returns: * monotonic_status (boolean) Whether the array was monotonic. If the return_direction flag was given then the returned value will be: ``(monotonic_status, direction)`` """ if array.ndim != 1 or len(array) <= 1: raise ValueError('The array to check must be 1 dimensional and have ' 'more than 1 element.') if ma.isMaskedArray(array) and ma.count_masked(array) != 0: raise ValueError('The array to check contains missing data.') # Identify the directions of the largest/most-positive and # smallest/most-negative steps. d = np.diff(array) sign_max_d = np.sign(np.max(d)) sign_min_d = np.sign(np.min(d)) if strict: monotonic = sign_max_d == sign_min_d and sign_max_d != 0 else: monotonic = (sign_min_d < 0 and sign_max_d <= 0) or \ (sign_max_d > 0 and sign_min_d >= 0) or \ (sign_min_d == sign_max_d == 0) if return_direction: if sign_max_d == 0: direction = sign_min_d else: direction = sign_max_d return monotonic, direction return monotonic def column_slices_generator(full_slice, ndims): """ Given a full slice full of tuples, return a dictionary mapping old data dimensions to new and a generator which gives the successive slices needed to index correctly (across columns). This routine deals with the special functionality for tuple based indexing e.g. [0, (3, 5), :, (1, 6, 8)] by first providing a slice which takes the non tuple slices out first i.e. [0, :, :, :] then subsequently iterates through each of the tuples taking out the appropriate slices i.e. [(3, 5), :, :] followed by [:, :, (1, 6, 8)] This method was developed as numpy does not support the direct approach of [(3, 5), : , (1, 6, 8)] for column based indexing. """ list_of_slices = [] # Map current dimensions to new dimensions, or None dimension_mapping = {None: None} _count_current_dim = 0 for i, i_key in enumerate(full_slice): if isinstance(i_key, (int, np.integer)): dimension_mapping[i] = None else: dimension_mapping[i] = _count_current_dim _count_current_dim += 1 # Get all of the dimensions for which a tuple of indices were provided # (numpy.ndarrays are treated in the same way tuples in this case) def is_tuple_style_index(key): return (isinstance(key, tuple) or (isinstance(key, np.ndarray) and key.ndim == 1)) tuple_indices = [i for i, key in enumerate(full_slice) if is_tuple_style_index(key)] # stg1: Take a copy of the full_slice specification, turning all tuples # into a full slice if tuple_indices != list(range(len(full_slice))): first_slice = list(full_slice) for tuple_index in tuple_indices: first_slice[tuple_index] = slice(None, None) # turn first_slice back into a tuple ready for indexing first_slice = tuple(first_slice) list_of_slices.append(first_slice) # stg2 iterate over each of the tuples for tuple_index in tuple_indices: # Create a list with the indices to span the whole data array that we # currently have spanning_slice_with_tuple = [slice(None, None)] * _count_current_dim # Replace the slice(None, None) with our current tuple spanning_slice_with_tuple[dimension_mapping[tuple_index]] = \ full_slice[tuple_index] # if we just have [(0, 1)] turn it into [(0, 1), ...] as this is # Numpy's syntax. if len(spanning_slice_with_tuple) == 1: spanning_slice_with_tuple.append(Ellipsis) spanning_slice_with_tuple = tuple(spanning_slice_with_tuple) list_of_slices.append(spanning_slice_with_tuple) # return the dimension mapping and a generator of slices return dimension_mapping, iter(list_of_slices) def _build_full_slice_given_keys(keys, ndim): """ Given the keys passed to a __getitem__ call, build an equivalent tuple of keys which span ndims. """ # Ensure that we always have a tuple of keys if not isinstance(keys, tuple): keys = tuple([keys]) # catch the case where an extra Ellipsis has been provided which can be # discarded iff len(keys)-1 == ndim if len(keys)-1 == ndim and \ Ellipsis in filter(lambda obj: not isinstance(obj, np.ndarray), keys): keys = list(keys) is_ellipsis = [key is Ellipsis for key in keys] keys.pop(is_ellipsis.index(True)) keys = tuple(keys) # for ndim >= 1 appending a ":" to the slice specification is allowable, # remove this now if len(keys) > ndim and ndim != 0 and keys[-1] == slice(None, None): keys = keys[:-1] if len(keys) > ndim: raise IndexError('More slices requested than dimensions. Requested ' '%r, but there were only %s dimensions.' % (keys, ndim)) # For each dimension get the slice which has been requested. # If no slice provided, then default to the whole dimension full_slice = [slice(None, None)] * ndim for i, key in enumerate(keys): if key is Ellipsis: # replace any subsequent Ellipsis objects in keys with # slice(None, None) as per Numpy keys = keys[:i] + tuple([slice(None, None) if key is Ellipsis else key for key in keys[i:]]) # iterate over the remaining keys in reverse to fill in # the gaps from the right hand side for j, key in enumerate(keys[:i:-1]): full_slice[-j-1] = key # we've finished with i now so stop the iteration break else: full_slice[i] = key # remove any tuples on dimensions, turning them into numpy array's for # consistent behaviour full_slice = tuple([np.array(key, ndmin=1) if isinstance(key, tuple) else key for key in full_slice]) return full_slice def _slice_data_with_keys(data, keys): """ Index an array-like object as "data[keys]", with orthogonal indexing. Args: * data (array-like): array to index. * keys (list): list of indexes, as received from a __getitem__ call. This enforces an orthogonal interpretation of indexing, which means that both 'real' (numpy) arrays and other array-likes index in the same way, instead of numpy arrays doing 'fancy indexing'. Returns (dim_map, data_region), where : * dim_map (dict) : A dimension map, as returned by :func:`column_slices_generator`. i.e. "dim_map[old_dim_index]" --> "new_dim_index" or None. * data_region (array-like) : The sub-array. .. Note:: Avoids copying the data, where possible. """ # Combines the use of _build_full_slice_given_keys and # column_slices_generator. # By slicing on only one index at a time, this also mostly avoids copying # the data, except some cases when a key contains a list of indices. n_dims = len(data.shape) full_slice = _build_full_slice_given_keys(keys, n_dims) dims_mapping, slices_iter = column_slices_generator(full_slice, n_dims) for this_slice in slices_iter: data = data[this_slice] if data.ndim > 0 and min(data.shape) < 1: # Disallow slicings where a dimension has no points, like "[5:5]". raise IndexError('Cannot index with zero length slice.') return dims_mapping, data def _wrap_function_for_method(function, docstring=None): """ Returns a wrapper function modified to be suitable for use as a method. The wrapper function renames the first argument as "self" and allows an alternative docstring, thus allowing the built-in help(...) routine to display appropriate output. """ # Generate the Python source for the wrapper function. # NB. The first argument is replaced with "self". args, varargs, varkw, defaults = inspect.getargspec(function) if defaults is None: basic_args = ['self'] + args[1:] default_args = [] simple_default_args = [] else: cutoff = -len(defaults) basic_args = ['self'] + args[1:cutoff] default_args = ['%s=%r' % pair for pair in zip(args[cutoff:], defaults)] simple_default_args = args[cutoff:] var_arg = [] if varargs is None else ['*' + varargs] var_kw = [] if varkw is None else ['**' + varkw] arg_source = ', '.join(basic_args + default_args + var_arg + var_kw) simple_arg_source = ', '.join(basic_args + simple_default_args + var_arg + var_kw) source = ('def %s(%s):\n return function(%s)' % (function.__name__, arg_source, simple_arg_source)) # Compile the wrapper function # NB. There's an outstanding bug with "exec" where the locals and globals # dictionaries must be the same if we're to get closure behaviour. my_locals = {'function': function} exec(source, my_locals, my_locals) # Update the docstring if required, and return the modified function wrapper = my_locals[function.__name__] if docstring is None: wrapper.__doc__ = function.__doc__ else: wrapper.__doc__ = docstring return wrapper class _MetaOrderedHashable(abc.ABCMeta): """ A metaclass that ensures that non-abstract subclasses of _OrderedHashable without an explicit __init__ method are given a default __init__ method with the appropriate method signature. Also, an _init method is provided to allow subclasses with their own __init__ constructors to initialise their values via an explicit method signature. NB. This metaclass is used to construct the _OrderedHashable class as well as all its subclasses. """ def __new__(cls, name, bases, namespace): # We only want to modify concrete classes that have defined the # "_names" property. if '_names' in namespace and \ not isinstance(namespace['_names'], abc.abstractproperty): args = ', '.join(namespace['_names']) # Ensure the class has a constructor with explicit arguments. if '__init__' not in namespace: # Create a default __init__ method for the class method_source = ('def __init__(self, %s):\n ' 'self._init_from_tuple((%s,))' % (args, args)) exec(method_source, namespace) # Ensure the class has a "helper constructor" with explicit # arguments. if '_init' not in namespace: # Create a default _init method for the class method_source = ('def _init(self, %s):\n ' 'self._init_from_tuple((%s,))' % (args, args)) exec(method_source, namespace) return super(_MetaOrderedHashable, cls).__new__( cls, name, bases, namespace) @functools.total_ordering class _OrderedHashable(six.with_metaclass(_MetaOrderedHashable, collections.Hashable)): """ Convenience class for creating "immutable", hashable, and ordered classes. Instance identity is defined by the specific list of attribute names declared in the abstract attribute "_names". Subclasses must declare the attribute "_names" as an iterable containing the names of all the attributes relevant to equality/hash-value/ordering. Initial values should be set by using :: self._init(self, value1, value2, ..) .. note:: It's the responsibility of the subclass to ensure that the values of its attributes are themselves hashable. """ @abc.abstractproperty def _names(self): """ Override this attribute to declare the names of all the attributes relevant to the hash/comparison semantics. """ pass def _init_from_tuple(self, values): for name, value in zip(self._names, values): object.__setattr__(self, name, value) def __repr__(self): class_name = type(self).__name__ attributes = ', '.join('%s=%r' % (name, value) for (name, value) in zip(self._names, self._as_tuple())) return '%s(%s)' % (class_name, attributes) def _as_tuple(self): return tuple(getattr(self, name) for name in self._names) # Prevent attribute updates def __setattr__(self, name, value): raise AttributeError('Instances of %s are immutable' % type(self).__name__) def __delattr__(self, name): raise AttributeError('Instances of %s are immutable' % type(self).__name__) # Provide hash semantics def _identity(self): return self._as_tuple() def __hash__(self): return hash(self._identity()) def __eq__(self, other): return (isinstance(other, type(self)) and self._identity() == other._identity()) def __ne__(self, other): # Since we've defined __eq__ we should also define __ne__. return not self == other # Provide default ordering semantics def __lt__(self, other): if isinstance(other, _OrderedHashable): return self._identity() < other._identity() else: return NotImplemented def create_temp_filename(suffix=''): """Return a temporary file name. Args: * suffix - Optional filename extension. """ temp_file = tempfile.mkstemp(suffix) os.close(temp_file[0]) return temp_file[1] def clip_string(the_str, clip_length=70, rider="..."): """ Returns a clipped version of the string based on the specified clip length and whether or not any graceful clip points can be found. If the string to be clipped is shorter than the specified clip length, the original string is returned. If the string is longer than the clip length, a graceful point (a space character) after the clip length is searched for. If a graceful point is found the string is clipped at this point and the rider is added. If no graceful point can be found, then the string is clipped exactly where the user requested and the rider is added. Args: * the_str The string to be clipped * clip_length The length in characters that the input string should be clipped to. Defaults to a preconfigured value if not specified. * rider A series of characters appended at the end of the returned string to show it has been clipped. Defaults to a preconfigured value if not specified. Returns: The string clipped to the required length with a rider appended. If the clip length was greater than the orignal string, the original string is returned unaltered. """ if clip_length >= len(the_str) or clip_length <= 0: return the_str else: if the_str[clip_length].isspace(): return the_str[:clip_length] + rider else: first_part = the_str[:clip_length] remainder = the_str[clip_length:] # Try to find a graceful point at which to trim i.e. a space # If no graceful point can be found, then just trim where the user # specified by adding an empty slice of the remainder ( [:0] ) termination_point = remainder.find(" ") if termination_point == -1: termination_point = 0 return first_part + remainder[:termination_point] + rider def format_array(arr): """ Returns the given array as a string, using the python builtin str function on a piecewise basis. Useful for xml representation of arrays. For customisations, use the :mod:`numpy.core.arrayprint` directly. """ summary_insert = "" summary_threshold = 85 edge_items = 3 ffunc = str formatArray = np.core.arrayprint._formatArray max_line_len = 50 legacy = '1.13' if arr.size > summary_threshold: summary_insert = '...' options = np.get_printoptions() options['legacy'] = legacy with _printopts_context(**options): result = formatArray( arr, ffunc, max_line_len, next_line_prefix='\t\t', separator=', ', edge_items=edge_items, summary_insert=summary_insert, legacy=legacy) return result @contextmanager def _printopts_context(**kwargs): """ Update the numpy printoptions for the life of this context manager. Note: this function can be removed with numpy>=1.15 thanks to https://github.com/numpy/numpy/pull/10406 """ original_opts = np.get_printoptions() np.set_printoptions(**kwargs) try: yield finally: np.set_printoptions(**original_opts) def new_axis(src_cube, scalar_coord=None): """ Create a new axis as the leading dimension of the cube, promoting a scalar coordinate if specified. Args: * src_cube (:class:`iris.cube.Cube`) Source cube on which to generate a new axis. Kwargs: * scalar_coord (:class:`iris.coord.Coord` or 'string') Scalar coordinate to promote to a dimension coordinate. Returns: A new :class:`iris.cube.Cube` instance with one extra leading dimension (length 1). For example:: >>> cube.shape (360, 360) >>> ncube = iris.util.new_axis(cube, 'time') >>> ncube.shape (1, 360, 360) """ if scalar_coord is not None: scalar_coord = src_cube.coord(scalar_coord) # Indexing numpy arrays requires loading deferred data here returning a # copy of the data with a new leading dimension. # If the source cube is a Masked Constant, it is changed here to a Masked # Array to allow the mask to gain an extra dimension with the data. if src_cube.has_lazy_data(): new_cube = iris.cube.Cube(src_cube.lazy_data()[None]) else: if isinstance(src_cube.data, ma.core.MaskedConstant): new_data = ma.array([np.nan], mask=[True]) else: new_data = src_cube.data[None] new_cube = iris.cube.Cube(new_data) new_cube.metadata = src_cube.metadata for coord in src_cube.aux_coords: if scalar_coord and scalar_coord == coord: dim_coord = iris.coords.DimCoord.from_coord(coord) new_cube.add_dim_coord(dim_coord, 0) else: dims = np.array(src_cube.coord_dims(coord)) + 1 new_cube.add_aux_coord(coord.copy(), dims) for coord in src_cube.dim_coords: coord_dims = np.array(src_cube.coord_dims(coord)) + 1 new_cube.add_dim_coord(coord.copy(), coord_dims) for factory in src_cube.aux_factories: new_cube.add_aux_factory(copy.deepcopy(factory)) return new_cube def as_compatible_shape(src_cube, target_cube): """ Return a cube with added length one dimensions to match the dimensionality and dimension ordering of `target_cube`. This function can be used to add the dimensions that have been collapsed, aggregated or sliced out, promoting scalar coordinates to length one dimension coordinates where necessary. It operates by matching coordinate metadata to infer the dimensions that need modifying, so the provided cubes must have coordinates with the same metadata (see :class:`iris.coords.CoordDefn`). .. note:: This function will load and copy the data payload of `src_cube`. Args: * src_cube: An instance of :class:`iris.cube.Cube` with missing dimensions. * target_cube: An instance of :class:`iris.cube.Cube` with the desired dimensionality. Returns: A instance of :class:`iris.cube.Cube` with the same dimensionality as `target_cube` but with the data and coordinates from `src_cube` suitably reshaped to fit. """ dim_mapping = {} for coord in target_cube.aux_coords + target_cube.dim_coords: dims = target_cube.coord_dims(coord) try: collapsed_dims = src_cube.coord_dims(coord) except iris.exceptions.CoordinateNotFoundError: continue if collapsed_dims: if len(collapsed_dims) == len(dims): for dim_from, dim_to in zip(dims, collapsed_dims): dim_mapping[dim_from] = dim_to elif dims: for dim_from in dims: dim_mapping[dim_from] = None if len(dim_mapping) != target_cube.ndim: raise ValueError('Insufficient or conflicting coordinate ' 'metadata. Cannot infer dimension mapping ' 'to restore cube dimensions.') new_shape = [1] * target_cube.ndim for dim_from, dim_to in six.iteritems(dim_mapping): if dim_to is not None: new_shape[dim_from] = src_cube.shape[dim_to] new_data = src_cube.data.copy() # Transpose the data (if necessary) to prevent assignment of # new_shape doing anything except adding length one dims. order = [v for k, v in sorted(dim_mapping.items()) if v is not None] if order != sorted(order): new_order = [order.index(i) for i in range(len(order))] new_data = np.transpose(new_data, new_order).copy() new_cube = iris.cube.Cube(new_data.reshape(new_shape)) new_cube.metadata = copy.deepcopy(src_cube.metadata) # Record a mapping from old coordinate IDs to new coordinates, # for subsequent use in creating updated aux_factories. coord_mapping = {} reverse_mapping = {v: k for k, v in dim_mapping.items() if v is not None} def add_coord(coord): """Closure used to add a suitably reshaped coord to new_cube.""" all_dims = target_cube.coord_dims(coord) src_dims = [dim for dim in src_cube.coord_dims(coord) if src_cube.shape[dim] > 1] mapped_dims = [reverse_mapping[dim] for dim in src_dims] length1_dims = [dim for dim in all_dims if new_cube.shape[dim] == 1] dims = length1_dims + mapped_dims shape = [new_cube.shape[dim] for dim in dims] if not shape: shape = [1] points = coord.points.reshape(shape) bounds = None if coord.has_bounds(): bounds = coord.bounds.reshape(shape + [coord.nbounds]) new_coord = coord.copy(points=points, bounds=bounds) # If originally in dim_coords, add to dim_coords, otherwise add to # aux_coords. if target_cube.coords(coord, dim_coords=True): try: new_cube.add_dim_coord(new_coord, dims) except ValueError: # Catch cases where the coord is an AuxCoord and therefore # cannot be added to dim_coords. new_cube.add_aux_coord(new_coord, dims) else: new_cube.add_aux_coord(new_coord, dims) coord_mapping[id(coord)] = new_coord for coord in src_cube.aux_coords + src_cube.dim_coords: add_coord(coord) for factory in src_cube.aux_factories: new_cube.add_aux_factory(factory.updated(coord_mapping)) return new_cube def squeeze(cube): """ Removes any dimension of length one. If it has an associated DimCoord or AuxCoord, this becomes a scalar coord. Args: * cube (:class:`iris.cube.Cube`) Source cube to remove length 1 dimension(s) from. Returns: A new :class:`iris.cube.Cube` instance without any dimensions of length 1. For example:: >>> cube.shape (1, 360, 360) >>> ncube = iris.util.squeeze(cube) >>> ncube.shape (360, 360) """ slices = [0 if cube.shape[dim] == 1 else slice(None) for dim in range(cube.ndim)] squeezed = cube[tuple(slices)] return squeezed def file_is_newer_than(result_path, source_paths): """ Return whether the 'result' file has a later modification time than all of the 'source' files. If a stored result depends entirely on known 'sources', it need only be re-built when one of them changes. This function can be used to test that by comparing file timestamps. Args: * result_path (string): The filepath of a file containing some derived result data. * source_paths (string or iterable of strings): The path(s) to the original datafiles used to make the result. May include wildcards and '~' expansions (like Iris load paths), but not URIs. Returns: True if all the sources are older than the result, else False. If any of the file paths describes no existing files, an exception will be raised. .. note:: There are obvious caveats to using file timestamps for this, as correct usage depends on how the sources might change. For example, a file could be replaced by one of the same name, but an older timestamp. If wildcards and '~' expansions are used, this introduces even more uncertainty, as then you cannot even be sure that the resulting list of file names is the same as the originals. For example, some files may have been deleted or others added. .. note:: The result file may often be a :mod:`pickle` file. In that case, it also depends on the relevant module sources, so extra caution is required. Ideally, an additional check on iris.__version__ is advised. """ # Accept a string as a single source path if isinstance(source_paths, six.string_types): source_paths = [source_paths] # Fix our chosen timestamp function file_date = os.path.getmtime # Get the 'result file' time result_timestamp = file_date(result_path) # Get all source filepaths, with normal Iris.io load helper function source_file_paths = iris.io.expand_filespecs(source_paths) # Compare each filetime, for each spec, with the 'result time' for path in source_file_paths: source_timestamp = file_date(path) if source_timestamp >= result_timestamp: return False return True def is_regular(coord): """Determine if the given coord is regular.""" try: regular_step(coord) except iris.exceptions.CoordinateNotRegularError: return False except (TypeError, ValueError): return False return True def regular_step(coord): """Return the regular step from a coord or fail.""" if coord.ndim != 1: raise iris.exceptions.CoordinateMultiDimError("Expected 1D coord") if coord.shape[0] < 2: raise ValueError("Expected a non-scalar coord") avdiff, regular = points_step(coord.points) if not regular: msg = "Coord %s is not regular" % coord.name() raise iris.exceptions.CoordinateNotRegularError(msg) return avdiff.astype(coord.points.dtype) def points_step(points): """Determine whether a NumPy array has a regular step.""" diffs = np.diff(points) avdiff = np.mean(diffs) # TODO: This value for `rtol` is set for test_analysis to pass... regular = np.allclose(diffs, avdiff, rtol=0.001) return avdiff, regular def unify_time_units(cubes): """ Performs an in-place conversion of the time units of all time coords in the cubes in a given iterable. One common epoch is defined for each calendar found in the cubes to prevent units being defined with inconsistencies between epoch and calendar. Each epoch is defined from the first suitable time coordinate found in the input cubes. Arg: * cubes: An iterable containing :class:`iris.cube.Cube` instances. """ epochs = {} for cube in cubes: for time_coord in cube.coords(): if time_coord.units.is_time_reference(): epoch = epochs.setdefault(time_coord.units.calendar, time_coord.units.origin) new_unit = cf_units.Unit(epoch, time_coord.units.calendar) time_coord.convert_units(new_unit) def _is_circular(points, modulus, bounds=None): """ Determine whether the provided points or bounds are circular in nature relative to the modulus value. If the bounds are provided then these are checked for circularity rather than the points. Args: * points: :class:`numpy.ndarray` of point values. * modulus: Circularity modulus value. Kwargs: * bounds: :class:`numpy.ndarray` of bound values. Returns: Boolean. """ circular = False if bounds is not None: # Set circular to True if the bounds ends are equivalent. first_bound = last_bound = None if bounds.ndim == 1 and bounds.shape[-1] == 2: first_bound = bounds[0] % modulus last_bound = bounds[1] % modulus elif bounds.ndim == 2 and bounds.shape[-1] == 2: first_bound = bounds[0, 0] % modulus last_bound = bounds[-1, 1] % modulus if first_bound is not None and last_bound is not None: circular = np.allclose(first_bound, last_bound, rtol=1.0e-5) else: # set circular if points are regular and last+1 ~= first if len(points) > 1: diffs = list(set(np.diff(points))) diff = np.mean(diffs) abs_tol = np.abs(diff * 1.0e-4) diff_approx_equal = np.max(np.abs(diffs - diff)) < abs_tol if diff_approx_equal: circular_value = (points[-1] + diff) % modulus try: np.testing.assert_approx_equal(points[0], circular_value, significant=4) circular = True except AssertionError: if points[0] == 0: try: np.testing.assert_approx_equal(modulus, circular_value, significant=4) circular = True except AssertionError: pass else: # XXX - Inherited behaviour from NetCDF PyKE rules. # We need to decide whether this is valid! circular = points[0] >= modulus return circular def promote_aux_coord_to_dim_coord(cube, name_or_coord): """ Promotes an AuxCoord on the cube to a DimCoord. This AuxCoord must be associated with a single cube dimension. If the AuxCoord is associated with a dimension that already has a DimCoord, that DimCoord gets demoted to an AuxCoord. Args: * cube An instance of :class:`iris.cube.Cube` * name_or_coord: Either (a) An instance of :class:`iris.coords.AuxCoord` or (b) the :attr:`standard_name`, :attr:`long_name`, or :attr:`var_name` of an instance of an instance of :class:`iris.coords.AuxCoord`. For example:: >>> print cube air_temperature / (K) (time: 12; latitude: 73; longitude: 96) Dimension coordinates: time x - - latitude - x - longitude - - x Auxiliary coordinates: year x - - >>> promote_aux_coord_to_dim_coord(cube, 'year') >>> print cube air_temperature / (K) (year: 12; latitude: 73; longitude: 96) Dimension coordinates: year x - - latitude - x - longitude - - x Auxiliary coordinates: time x - - """ if isinstance(name_or_coord, six.string_types): aux_coord = cube.coord(name_or_coord) elif isinstance(name_or_coord, iris.coords.Coord): aux_coord = name_or_coord else: # Don't know how to handle this type msg = ("Don't know how to handle coordinate of type {}. " "Ensure all coordinates are of type six.string_types or " "iris.coords.Coord.") msg = msg.format(type(name_or_coord)) raise TypeError(msg) if aux_coord in cube.dim_coords: # nothing to do return if aux_coord not in cube.aux_coords: msg = ("Attempting to promote an AuxCoord ({}) " "which does not exist in the cube.") msg = msg.format(aux_coord.name()) raise ValueError(msg) coord_dim = cube.coord_dims(aux_coord) if len(coord_dim) != 1: msg = ("Attempting to promote an AuxCoord ({}) " "which is associated with {} dimensions.") msg = msg.format(aux_coord.name(), len(coord_dim)) raise ValueError(msg) try: dim_coord = iris.coords.DimCoord.from_coord(aux_coord) except ValueError as valerr: msg = ("Attempt to promote an AuxCoord ({}) fails " "when attempting to create a DimCoord from the " "AuxCoord because: {}") msg = msg.format(aux_coord.name(), str(valerr)) raise ValueError(msg) old_dim_coord = cube.coords(dim_coords=True, contains_dimension=coord_dim[0]) if len(old_dim_coord) == 1: demote_dim_coord_to_aux_coord(cube, old_dim_coord[0]) # order matters here: don't want to remove # the aux_coord before have tried to make # dim_coord in case that fails cube.remove_coord(aux_coord) cube.add_dim_coord(dim_coord, coord_dim) def demote_dim_coord_to_aux_coord(cube, name_or_coord): """ Demotes a dimension coordinate on the cube to an auxiliary coordinate. The DimCoord is demoted to an auxiliary coordinate on the cube. The dimension of the cube that was associated with the DimCoord becomes anonymous. The class of the coordinate is left as DimCoord, it is not recast as an AuxCoord instance. Args: * cube An instance of :class:`iris.cube.Cube` * name_or_coord: Either (a) An instance of :class:`iris.coords.DimCoord` or (b) the :attr:`standard_name`, :attr:`long_name`, or :attr:`var_name` of an instance of an instance of :class:`iris.coords.DimCoord`. For example:: >>> print cube air_temperature / (K) (time: 12; latitude: 73; longitude: 96) Dimension coordinates: time x - - latitude - x - longitude - - x Auxiliary coordinates: year x - - >>> demote_dim_coord_to_aux_coord(cube, 'time') >>> print cube air_temperature / (K) (-- : 12; latitude: 73; longitude: 96) Dimension coordinates: latitude - x - longitude - - x Auxiliary coordinates: time x - - year x - - """ if isinstance(name_or_coord, six.string_types): dim_coord = cube.coord(name_or_coord) elif isinstance(name_or_coord, iris.coords.Coord): dim_coord = name_or_coord else: # Don't know how to handle this type msg = ("Don't know how to handle coordinate of type {}. " "Ensure all coordinates are of type six.string_types or " "iris.coords.Coord.") msg = msg.format(type(name_or_coord)) raise TypeError(msg) if dim_coord not in cube.dim_coords: # nothing to do return coord_dim = cube.coord_dims(dim_coord) cube.remove_coord(dim_coord) cube.add_aux_coord(dim_coord, coord_dim) @functools.wraps(np.meshgrid) def _meshgrid(*xi, **kwargs): """ @numpy v1.13, the dtype of each output nD coordinate is the same as its associated input 1D coordinate. This is not the case prior to numpy v1.13, where the output dtype is cast up to its highest resolution, regardlessly. This convenience function ensures consistent meshgrid behaviour across numpy versions. Reference: https://github.com/numpy/numpy/pull/5302 """ mxi = np.meshgrid(*xi, **kwargs) for i, (mxii, xii) in enumerate(zip(mxi, xi)): if mxii.dtype != xii.dtype: mxi[i] = mxii.astype(xii.dtype) return mxi
duncanwp/iris
lib/iris/util.py
Python
lgpl-3.0
52,689
[ "NetCDF" ]
e294469678cfaebcac41ec73fc1a9e4834af2a6fdab551d947242423cf4d785d
""" AlwaysActivePolicy module """ from __future__ import absolute_import from __future__ import division from __future__ import print_function __RCSID__ = '$Id$' from DIRAC import S_OK from DIRAC.ResourceStatusSystem.PolicySystem.PolicyBase import PolicyBase class AlwaysActivePolicy(PolicyBase): """ The AlwaysActivePolicy is a dummy module that can be used as example, it always returns Active status. """ @staticmethod def _evaluate(commandResult): """ It returns Active status, evaluates the default command, but its output is completely ignored. """ policyResult = {'Status': 'Active', 'Reason': 'AlwaysActive'} return S_OK(policyResult)
yujikato/DIRAC
src/DIRAC/ResourceStatusSystem/Policy/AlwaysActivePolicy.py
Python
gpl-3.0
714
[ "DIRAC" ]
36406718e8fa4c24fca8b45a87364438862d6078cc15617884d1a45d7de8c810
import datetime import urllib import os from django.utils import timezone from django.utils.timezone import utc from django.contrib.auth.models import User from funfactory.urlresolvers import reverse from nose.tools import eq_, ok_ from airmozilla.search.models import LoggedSearch from airmozilla.main.models import Event, UserProfile, Tag, Channel from airmozilla.base.tests.testbase import DjangoTestCase class TestSearch(DjangoTestCase): fixtures = ['airmozilla/manage/tests/main_testdata.json'] placeholder_path = 'airmozilla/manage/tests/firefox.png' placeholder = os.path.basename(placeholder_path) def test_basic_search(self): Event.objects.all().delete() self._upload_media(self.placeholder_path) today = timezone.now() event = Event.objects.create( title='Entirely Different', slug=today.strftime('test-event-%Y%m%d'), start_time=today, placeholder_img=self.placeholder, status=Event.STATUS_INITIATED, description="These are my words." ) assert event not in Event.objects.approved() url = reverse('search:home') response = self.client.get(url) eq_(response.status_code, 200) response = self.client.get(url, {'q': 'entirely'}) eq_(response.status_code, 200) ok_('Nothing found' in response.content) event.status = Event.STATUS_SCHEDULED event.save() assert event in Event.objects.approved() assert event.privacy == Event.PRIVACY_PUBLIC response = self.client.get(url, {'q': 'entirely'}) eq_(response.status_code, 200) ok_('Nothing found' not in response.content) ok_(event.title in response.content) ok_('value="entirely"' in response.content) def test_basic_search_with_privacy_filter(self): Event.objects.all().delete() today = timezone.now() event = Event.objects.create( title='Entirely Different', slug=today.strftime('test-event-%Y%m%d'), start_time=today, placeholder_img=self.placeholder, status=Event.STATUS_SCHEDULED, description="These are my words." ) assert event in Event.objects.approved() url = reverse('search:home') response = self.client.get(url, {'q': 'entirely'}) eq_(response.status_code, 200) ok_('Nothing found' not in response.content) ok_(event.title in response.content) event.privacy = Event.PRIVACY_CONTRIBUTORS event.save() response = self.client.get(url, {'q': 'entirely'}) eq_(response.status_code, 200) ok_('Nothing found' in response.content) contributor = User.objects.create_user( 'nigel', 'nigel@live.com', 'secret' ) user_profile = UserProfile.objects.create( user=contributor, contributor=True ) assert self.client.login(username='nigel', password='secret') response = self.client.get(url, {'q': 'entirely'}) eq_(response.status_code, 200) ok_('Nothing found' not in response.content) ok_(event.title in response.content) event.privacy = Event.PRIVACY_COMPANY event.save() response = self.client.get(url, {'q': 'entirely'}) eq_(response.status_code, 200) ok_('Nothing found' in response.content) user_profile.contributor = False user_profile.save() response = self.client.get(url, {'q': 'entirely'}) eq_(response.status_code, 200) ok_('Nothing found' not in response.content) ok_(event.title in response.content) def test_search_ordering(self): Event.objects.all().delete() today = timezone.now() event1 = Event.objects.create( title='Entirely Different', slug=today.strftime('test-event-%Y%m%d'), start_time=today, placeholder_img=self.placeholder, status=Event.STATUS_SCHEDULED, description="A different word is not mentioned here." ) assert event1 in Event.objects.approved() yesterday = today - datetime.timedelta(days=1) event2 = Event.objects.create( title='Muscle Belts', slug=yesterday.strftime('test-event-%Y%m%d'), start_time=yesterday.replace(tzinfo=utc), placeholder_img=self.placeholder, status=Event.STATUS_SCHEDULED, description="The word entirely appears here" ) assert event2 in Event.objects.approved() url = reverse('search:home') response = self.client.get(url, {'q': 'entirely'}) eq_(response.status_code, 200) ok_('Nothing found' not in response.content) ok_(event1.title in response.content) ok_(event2.title in response.content) # event1 should appear ahead of event2 # because event1 has the word "entirely" in the title ok_(response.content.find(event1.title) < response.content.find(event2.title)) response = self.client.get(url, {'q': 'words'}) eq_(response.status_code, 200) ok_('Nothing found' not in response.content) ok_(event1.title in response.content) ok_(event2.title in response.content) # "word" appears in both but event1 is newer ok_(response.content.find(event1.title) < response.content.find(event2.title)) def test_search_by_stopwords(self): Event.objects.all().delete() today = timezone.now() event = Event.objects.create( title='THis is Different', slug=today.strftime('test-event-%Y%m%d'), start_time=today, placeholder_img=self.placeholder, status=Event.STATUS_SCHEDULED, description="These are my words." ) assert event in Event.objects.approved() url = reverse('search:home') response = self.client.get(url) eq_(response.status_code, 200) response = self.client.get(url, {'q': 'this'}) eq_(response.status_code, 200) ok_('Nothing found' in response.content) response = self.client.get(url, {'q': 'this is are these'}) eq_(response.status_code, 200) ok_('Nothing found' in response.content) response = self.client.get(url, {'q': 'are'}) eq_(response.status_code, 200) ok_('Nothing found' in response.content) def test_search_with_strange_characters(self): Event.objects.all().delete() today = timezone.now() event = Event.objects.create( title='THis is Different', slug=today.strftime('test-event-%Y%m%d'), start_time=today, placeholder_img=self.placeholder, status=Event.STATUS_SCHEDULED, description="These are my words." ) assert event in Event.objects.approved() url = reverse('search:home') # first check that specific words work response = self.client.get(url, {'q': 'WORD'}) eq_(response.status_code, 200) ok_('Nothing found' not in response.content) ok_(event.title in response.content) response = self.client.get(url, {'q': "O'Brian Should Work"}) eq_(response.status_code, 200) ok_('Nothing found' in response.content) response = self.client.get(url, {'q': 'are my'}) eq_(response.status_code, 200) ok_('Nothing found' in response.content) # this won't allow the automatic OR response = self.client.get(url, {'q': 'WORDS | LETTERS'}) eq_(response.status_code, 200) ok_('Nothing found' in response.content) response = self.client.get(url, {'q': 'WORDS & LETTERS'}) eq_(response.status_code, 200) ok_('Nothing found' in response.content) response = self.client.get(url, {'q': 'WORDS LETTERS'}) eq_(response.status_code, 200) ok_('Nothing found' not in response.content) ok_(event.title in response.content) def test_search_with_nothing(self): Event.objects.all().delete() today = timezone.now() event = Event.objects.create( title='THis is Different', slug=today.strftime('test-event-%Y%m%d'), start_time=today, placeholder_img=self.placeholder, status=Event.STATUS_SCHEDULED, description="These are my words." ) assert event in Event.objects.approved() url = reverse('search:home') response = self.client.get(url, {'q': ''}) eq_(response.status_code, 200) ok_(event.title not in response.content) def test_search_by_stemming(self): Event.objects.all().delete() today = timezone.now() event = Event.objects.create( title='Engagement Discussion', slug=today.strftime('test-event-%Y%m%d'), start_time=today, placeholder_img=self.placeholder, status=Event.STATUS_SCHEDULED, description="These are my words." ) assert event in Event.objects.approved() url = reverse('search:home') response = self.client.get(url, {'q': 'discuss'}) eq_(response.status_code, 200) ok_('Nothing found' not in response.content) ok_(event.title in response.content) response = self.client.get(url, {'q': 'discussions'}) eq_(response.status_code, 200) ok_('Nothing found' not in response.content) ok_(event.title in response.content) response = self.client.get(url, {'q': 'engage'}) eq_(response.status_code, 200) ok_('Nothing found' not in response.content) ok_(event.title in response.content) def test_search_with_highlight(self): Event.objects.all().delete() today = timezone.now() event = Event.objects.create( title='Engagement Discussion', slug=today.strftime('test-event-%Y%m%d'), start_time=today, placeholder_img=self.placeholder, status=Event.STATUS_SCHEDULED, description="These are my words." ) assert event in Event.objects.approved() url = reverse('search:home') response = self.client.get(url, {'q': 'discuss'}) eq_(response.status_code, 200) ok_('Nothing found' not in response.content) ok_('<b>Discussion</b>' in response.content) event.title += ' <input name="foo">' event.save() response = self.client.get(url, {'q': 'discuss'}) eq_(response.status_code, 200) ok_('Nothing found' not in response.content) ok_('<b>Discussion</b>' in response.content) ok_('<input name="foo">' not in response.content) response = self.client.get(url, {'q': 'WORD'}) eq_(response.status_code, 200) ok_('Nothing found' not in response.content) # because it's not the short description ok_('<b>words</b>' not in response.content) event.short_description = "These are your words." event.save() response = self.client.get(url, {'q': 'WORD'}) eq_(response.status_code, 200) ok_('Nothing found' not in response.content) ok_('<b>words</b>' in response.content) event.short_description += ' <script>alert("xxx")</script>' event.save() response = self.client.get(url, {'q': 'WORD'}) eq_(response.status_code, 200) ok_('Nothing found' not in response.content) ok_('<script>alert' not in response.content) def test_searching_multi_words_finding_with_or(self): Event.objects.all().delete() today = timezone.now() event1 = Event.objects.create( title='Blobber Fest', slug='blobbering', start_time=today, placeholder_img=self.placeholder, status=Event.STATUS_SCHEDULED, description="These are my words." ) assert event1 in Event.objects.approved() event2 = Event.objects.create( title='Beauty and the Beast', slug='beauty-and-beast', start_time=today, placeholder_img=self.placeholder, status=Event.STATUS_SCHEDULED, description="These are other words." ) assert event2 in Event.objects.approved() url = reverse('search:home') response = self.client.get(url, {'q': 'BLOBBER'}) eq_(response.status_code, 200) ok_('Nothing found' not in response.content) ok_(event1.title in response.content) ok_(event2.title not in response.content) response = self.client.get(url, {'q': 'BEAST'}) eq_(response.status_code, 200) ok_('Nothing found' not in response.content) ok_(event1.title not in response.content) ok_(event2.title in response.content) response = self.client.get(url, {'q': 'BLOBBER BEAST'}) eq_(response.status_code, 200) ok_('Nothing found' not in response.content) ok_(event1.title in response.content) ok_(event2.title in response.content) def test_search_with_sql_injection(self): assert Event.objects.approved() url = reverse('search:home') q = '1" onmouseover=prompt(931357) bad="' response = self.client.get(url, {'q': q}) eq_(response.status_code, 200) ok_('Nothing found' in response.content) def test_search_by_transcript(self): assert Event.objects.approved() url = reverse('search:home') q = 'fingerfood' response = self.client.get(url, {'q': q}) eq_(response.status_code, 200) ok_('Nothing found' in response.content) event, = Event.objects.approved() event.transcript = 'I love fingerfoods' event.save() response = self.client.get(url, {'q': q}) eq_(response.status_code, 200) ok_('Nothing found' not in response.content) ok_(event.title in response.content) ok_('found by transcript' in response.content) # but if the event is found because of the description... event.short_description = "Peter talks about his love for fingerfood" event.save() response = self.client.get(url, {'q': q}) eq_(response.status_code, 200) ok_('Nothing found' not in response.content) ok_(event.title in response.content) ok_('found by transcript' not in response.content) def test_paginated_search(self): event = Event.objects.get(title='Test event') for i in range(14): Event.objects.create( title='Test event %d' % (i + 1), short_description=event.short_description, description=event.description, start_time=event.start_time, archive_time=event.archive_time, location=event.location, privacy=event.privacy, status=event.status, placeholder_img=event.placeholder_img, ) url = reverse('search:home') response = self.client.get(url, {'q': 'TEST EVENT'}) eq_(response.status_code, 200) eq_(response.content.count('Test event'), 10) response = self.client.get(url, {'q': 'TEST EVENT', 'page': 2}) eq_(response.status_code, 200) eq_(response.content.count('Test event'), 5) # but don't try to mess with it response = self.client.get(url, {'q': 'TEST EVENT', 'page': 0}) eq_(response.status_code, 400) def test_search_by_tag(self): assert Event.objects.approved() url = reverse('search:home') response = self.client.get(url, {'q': 'tag: mytag'}) eq_(response.status_code, 200) ok_('Nothing found' in response.content) tag = Tag.objects.create(name='mytag') event = Event.objects.get(title='Test event') event.tags.add(tag) response = self.client.get(url, {'q': 'tag: mytag'}) eq_(response.status_code, 200) ok_('Nothing found' not in response.content) ok_(event.title in response.content) # should work case insensitively response = self.client.get(url, {'q': 'TAG: MYTAG'}) eq_(response.status_code, 200) ok_('Nothing found' not in response.content) ok_(event.title in response.content) # combine with something else to be found response = self.client.get(url, {'q': 'Test tag:mytag'}) eq_(response.status_code, 200) ok_('Nothing found' not in response.content) ok_(event.title in response.content) # combine with something else to be NOT found response = self.client.get(url, {'q': 'Somethingelse tag:mytag'}) eq_(response.status_code, 200) ok_('Nothing found' in response.content) def test_search_and_suggest_tags(self): url = reverse('search:home') event = Event.objects.get(title='Test event') tag = Tag.objects.create(name='rust') event.tags.add(tag) response = self.client.get(url, {'q': 'RUst'}) eq_(response.status_code, 200) # because neither title or description contains this ok_('Nothing found' in response.content) tag_search_url = url + '?q=%s' % urllib.quote_plus('tag: rust') ok_(tag_search_url in response.content) # But searching for parts of the tag word should not suggest the # tag. # See https://bugzilla.mozilla.org/show_bug.cgi?id=1072985 response = self.client.get(url, {'q': 'rusty'}) eq_(response.status_code, 200) ok_(tag_search_url not in response.content) def test_search_and_suggest_multiple_tags(self): url = reverse('search:home') event = Event.objects.get(title='Test event') tag = Tag.objects.create(name='mytag') event.tags.add(tag) othertag = Tag.objects.create(name='other tag') event.tags.add(othertag) response = self.client.get(url, {'q': 'mytag other tag'}) eq_(response.status_code, 200) # because neither title or description contains this ok_('Nothing found' in response.content) tag_search_url = ( url + '?q=%s' % urllib.quote_plus('other tag tag: mytag') ) ok_(tag_search_url in response.content) othertag_search_url = ( url + '?q=%s' % urllib.quote_plus('mytag tag: other tag') ) ok_(othertag_search_url in response.content) def test_search_by_channel(self): assert Event.objects.approved() url = reverse('search:home') event = Event.objects.get(title='Test event') response = self.client.get(url, {'q': 'channel: Grow Mozilla'}) eq_(response.status_code, 200) ok_('Nothing found' in response.content) channel = Channel.objects.create( name='Grow Mozilla', slug='gr-mozilla') event.channels.add(channel) response = self.client.get(url, {'q': 'channel: Grow Mozilla'}) eq_(response.status_code, 200) ok_('Nothing found' not in response.content) ok_(event.title in response.content) # should work case insensitively response = self.client.get(url, {'q': 'CHANNEL: GROW mozilla'}) eq_(response.status_code, 200) ok_('Nothing found' not in response.content) ok_(event.title in response.content) # combine with something else to be found response = self.client.get(url, {'q': 'Test channel:grow mozilla'}) eq_(response.status_code, 200) ok_('Nothing found' not in response.content) ok_(event.title in response.content) # combine with something else to be NOT found response = self.client.get( url, {'q': 'Somethingelse channel:grow mozilla'} ) eq_(response.status_code, 200) ok_('Nothing found' in response.content) def test_search_and_suggest_channels(self): url = reverse('search:home') event = Event.objects.get(title='Test event') channel = Channel.objects.create(name='Grow Mozilla') event.channels.add(channel) response = self.client.get(url, {'q': 'grow mozilla'}) eq_(response.status_code, 200) # because neither title or description contains this ok_('Nothing found' in response.content) channel_search_url = ( url + '?q=%s' % urllib.quote_plus('channel: Grow Mozilla') ) ok_(channel_search_url in response.content) # See https://bugzilla.mozilla.org/show_bug.cgi?id=1072985 Channel.objects.create(name='Rust', slug='rust') channel_search_url = ( url + '?q=%s' % urllib.quote_plus('y channel: Rust') ) response = self.client.get(url, {'q': 'rusty'}) eq_(response.status_code, 200) ok_(channel_search_url not in response.content) def test_logged_search(self): url = reverse('search:home') response = self.client.get(url, {'q': 'TesT'}) eq_(response.status_code, 200) ok_('Nothing found' not in response.content) logged_search = LoggedSearch.objects.get( term='TesT', results=1, page=1, ) # now after that, click on the found event event = Event.objects.get(title='Test event') event_url = reverse('main:event', args=(event.slug,)) ok_(event_url in response.content) response = self.client.get(event_url) eq_(response.status_code, 200) # using a session it should now record that that search # lead to clicking this event logged_search = LoggedSearch.objects.get(pk=logged_search.pk) eq_(logged_search.event_clicked, event) def test_logged_search_not_empty_searches(self): url = reverse('search:home') response = self.client.get(url, {'q': ''}) eq_(response.status_code, 200) ok_('Nothing found' not in response.content) ok_(not LoggedSearch.objects.all()) # or something too short response = self.client.get(url, {'q': '1'}) eq_(response.status_code, 200) ok_('Too short' in response.content) ok_(not LoggedSearch.objects.all()) response = self.client.get(url, {'q': ' ' * 10}) eq_(response.status_code, 200) ok_('Nothing found' not in response.content) ok_(not LoggedSearch.objects.all()) # but search by channel or tag without a wildcard should log response = self.client.get(url, {'q': 'channel: Foo'}) eq_(response.status_code, 200) ok_('Nothing found' in response.content) ok_(LoggedSearch.objects.all()) def test_unicode_next_page_links(self): """https://bugzilla.mozilla.org/show_bug.cgi?id=1079370""" event = Event.objects.get(title='Test event') for i in range(20): Event.objects.create( title=u'T\xe4st event %d' % (i + 1), short_description=event.short_description, description=event.description, start_time=event.start_time, archive_time=event.archive_time, location=event.location, privacy=event.privacy, status=event.status, placeholder_img=event.placeholder_img, ) url = reverse('search:home') response = self.client.get(url, {'q': u'T\xe4sT'}) eq_(response.status_code, 200) ok_('Nothing found' not in response.content) def test_event_channels(self): # tests if channels were in events from search event = Event.objects.get(title='Test event') channel = Channel.objects.create( name='TestChannel', slug='test-channel') event.channels.add(channel) url = reverse('search:home') response = self.client.get(url, {'q': 'Test event'}) eq_(response.status_code, 200) ok_(channel.slug in response.content) ok_(channel.name in response.content)
bugzPDX/airmozilla
airmozilla/search/tests/test_views.py
Python
bsd-3-clause
24,403
[ "Brian" ]
282ab5db87054340ea21055a1cf6715faeea727eb0290294840f298283aabac2
# -*- coding: utf-8 -*- { '"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" je opcioni izraz kao sto je "polje1=\'nova vrijednost\'". Ne mozete azurirati ili izbristi rezultati JOIN-a', '# of Houses Damaged': 'Broj oštećenih kuća', '# of Houses Destroyed': 'Broj uništenih kuća', '# of International Staff': 'Broj međunarodnog osoblja', '# of National Staff': 'Broj nacionalnog osoblja', '# of People Deceased': 'Broj prenminulih ljudi', '# of People Injured': 'Broj povrijeđenih osoba', '# of Vehicles': 'Broj vozila', '# Results per query': 'Broj rezultata po upitu', '# selected': '# odabrano', '%(app)s not installed. Ask the Server Administrator to install on Server.': '%(app)s nije instaliran. Pitajte administratora servera da vam to instalira na serveru.', '%(count)s Recipients': '%(count)s primalaca', '%(count)s Roles of the user removed': '%(count)s uloga korisnika obrisano', '%(count)s Users removed from Role': '%(count)s korisnika izbačeno iz uloge', '%(count_of)d translations have been imported to the %(language)s language file': '%(count_of)d prijevoda je uvezeno u %(language)s jezičku datoteku', '%(GRN)s Number': '%(GRN)s broj', '%(GRN)s Status': '%(GRN)s Status', '%(item)s requested from %(site)s': '%(item)s zahtijevano sa %(site)s', '%(label)s contains %(values)s': '%(label)s sadrži %(values)s', '%(label)s contains any of %(values)s': '%(label)s sadrži jedno od %(values)s', '%(label)s does not contain %(values)s': '%(label)s ne sadrži %(values)s', '%(label)s is %(values)s': '%(label)s je %(values)s', '%(label)s like %(values)s': '%(label)s želi %(values)s', '%(label)s not like %(values)s': '%(label)s ne želiu %(values)s', '%(msg)s\nIf the request type is "%(type)s", please enter the %(type)s on the next screen.': '%(msg)s\nAko je tip zahtjeva "%(type)s", molim unesite %(type)s na slijedećem ekranu.', '%(msg)s\r\nIf the request type is "%(type)s", please enter the %(type)s on the next screen.': '%(msg)s\r\nAko je tip zahtjeva "%(type)s", molim unesite %(type)s na slijedećem ekranu.', '%(pe)s in %(location)s': '%(pe)s u %(location)s', '%(PO)s Number': '%(PO)s broj', '%(proj4js)s definition': '%(proj4js)s definicija', '%(quantity)s in stock': '%(quantity)s na zalihi', '%(REQ)s Number': '%(REQ)s broj', '%(resource)s Filter': '%(resource)s Filter', '%(site)s (Recipient)': '%(site)s (primaoc)', '%(site)s has no items exactly matching this request. There may still be other items in stock which can fulfill this request!': '%(site)s nema stavki koje odgovaraju ovom zahtjevu. Možda ima drugih stavki koje mogu ispuniti ovaj zahtjev!', '%(site_label)s Status': '%(site_label)s Status', '%(site_label)s Status added': '%(site_label)s Status dodan', '%(site_label)s Status deleted': '%(site_label)s Status obrisan', '%(site_label)s Status updated': '%(site_label)s Status ažuriran', '%(system_name)s - New User Registered': '%(system_name)s - Novi korisnik registrovan', '%(system_name)s - New User Registration Approval Pending': '%(system_name)s - Zahtjev za registracijom novog korisnika', '%(system_name)s - Verify Email': '%(system_name) - Potvrdite vaš Email', '%(system_name)s has sent an email to %(email)s to verify your email address.nPlease check your email to verify this address. If you do not receive this email please check you junk email or spam filters.': '%(system_name)s je poslao poštu za %(email)s da provjeru vašu adresu elektronske pošte.Molim provjerite vašu elektronsku poštu da ovjerite ovu adresu. Ako ne primite poruku elektronske pošte, provjerite vaše spam filtere ili poruke u smeću.', '%m-%d-%Y': '%d-%m-%Y', '%m-%d-%Y %H:%M:%S': '%m-%d-%Y %H:%M:%S', '%s linked to %s': '%s vezan za %s', '%s or %s': '%s ili %s', '%s rows deleted': '%s redova uklonjeno', '%s rows updated': '%s redova ažurirano', '%s selected': '%s izabrano', '%Y-%m-%d': '%d-%m-%Y', '%Y-%m-%d %H:%M': '%d-%m-%Y %H:%M', '%Y-%m-%d %H:%M:%S': '%d-%m-%Y %H:%M:%S', '%Y-%m-%d %H:%M:00': '%d.%m.%Y. %H:%M:00', '& then click on the map below to adjust the Lat/Lon fields': 'i pritisnete na mapu ispod za podešavanje geografske Dužine/Širine', "'%s %%{row} deleted',nrows": "'%s %%{row} obrisan',nrows", "'%s %%{row} updated',nrows": "'%s %%{row} ažurirano',nrows", "'Cancel' will indicate an asset log entry did not occur": "'Otkaži' će indicirati na to se unos sredstva u zapisnik nije desio.", '(filtered from _MAX_ total entries)': '(filtrirano iz _MAX_ elemenata)', '* Required Fields': '* Obavezna polja', '...or add a new bin': '...ili dodaj novu korpu', '0-15 minutes': '0-15 minuta', '1 Assessment': '1 Procjena', '1 location, shorter time, can contain multiple Tasks': '1 lokacija, kraće vrijeme, može sadržavati više Zadataka', '1-3 days': '1-3 dana', '1. Fill the necessary fields in BLOCK CAPITAL letters.': '1. Popunite potrebna polja VELIKIM SLOVIMA.', '15-30 minutes': '15-30 minuta', '2 different options are provided here currently:': '2 različite opcije su pružene trenutno:', '2. Always use one box per letter and leave one box space to separate words.': '2. Uvijek koristite jednu kućicu po slovu i koristite praznu kućicu da odvajate riječi', '2x4 Car': '2x4 auto', '3. Fill in the circles completely.': '3. Kružiće potpuno popunite', '30-60 minutes': '30 do 60 minuta', '3W Report': '3W izvještaj', '4-7 days': '4-7 dana', '4x4 Car': '4x4 auto', '8-14 days': '8-14 dana', '_NUM_ duplicates found': '_NUM_ duplikata nađeno', 'A block of rich text which could be embedded into a page, viewed as a complete page or viewed as a list of news items.': 'Blok bogatog teksta koji se može ugraditi u stranicu, vidljiv kao potpuna strana ili vidljiv kao lista novih stavki.', 'A brief description of the group (optional)': 'Kratki opis grupe (proizvoljno)', 'A catalog of different Assessment Templates including summary information': 'Katalog raznih predložaka procjena uključujući sumarne informacije', 'A collection of Feature Classes which can be displayed together on a map or exported together.': 'Skup klasa karakteristika koje se mogu prikazati na mapi ili izvesti zajedno', 'A file downloaded from a GPS containing a series of geographic points in XML format.': 'Dokument skinut iz GPS-a koji sadrži geografske lokacije u XML formatu', 'A file in GPX format taken from a GPS whose timestamps can be correlated with the timestamps on the photos to locate them on the map.': 'Datoteka u GPX formatu uzeta iz GPS-a čije vremenske oznake mogu biti povezane sa vremenskim oznakama na slikama da bi ih locirali na mapi', 'A file in GPX format taken from a GPS.': 'Datoteka u GPX formatu uzeta s GPS.', 'A library of digital resources, such as photos, documents and reports': 'Biblioteka digitalnih izvora, kao što su fotografije, dokumenti i izvješća', 'A location group can be used to define the extent of an affected area, if it does not fall within one administrative region.': 'Grupa lokacija se može iskoristiti da se definiše obim pogođene oblasti, ako ne spada unutar jedne administrativne regije.', 'A location group is a set of locations (often, a set of administrative regions representing a combined area).': 'Lokacijska grupa je skup lokacija (često set administrativnih regija koje predstavljaju kombinovano područje).', 'A location group is a set of locations (often, a set of administrative regions representing a combined area). Member locations are added to a location group here. Location groups may be used to filter what is shown on the map and in search results to only entities covered by locations in the group. A location group can be used to define the extent of an affected area, if it does not fall within one administrative region. Location groups can be used in the Regions menu.': 'Grupa lokacija je skup lokacija (često, skup administrativnih regija koji predstavlja kombinovano područje). Članske lokacije se dodaje grupi lokacija ovdje. Grupe lokacija se mogu koristiti za filtriranje onoga što je prikazano na karti i na rezultate pretraživanja samo po mjestima unutar grupe lokacija. Grupe lokacija se mogu koristiti za definiranje područja na ugroženom području, ako one ne spadaju u jedan administrativni region. Grupe lokacija mogu se koristiti u meniju regiona.', 'A location group must have at least one member.': 'Grupa lokacije mora imati bar jednog člana', "A location that specifies the geographic area for this region. This can be a location from the location hierarchy, or a 'group location', or a location that has a boundary for the area.": 'Lokacija koja određuje geografsko područje ove regije. Ovo može biti mjesto iz lokacijske hijerarhije , ili grupna lokacija , ili lokacija na granici područja', 'A Marker assigned to an individual Location is set if there is a need to override the Marker assigned to the Feature Class.': 'Oznaka je dodijeljena pojedinačnoj lokaciji u slučaju da postoji potreba za zamjenu oznake dodijeljene Klasi karakteristika.', 'A place within a Site like a Shelf, room, bin number etc.': 'Tačka na mjestu, poput police, sobe, broja korpe itd.', 'A project milestone marks a significant date in the calendar which shows that progress towards the overall objective is being made.': 'Projektni miljokaz predstavlja značajan datum u kalendaru koji pokazuje da je napredak prema glavnom cilju postignut.', 'A Reference Document such as a file, URL or contact person to verify this data.': 'Prateći dokument u vidu datoteke, URL-a ili kontakt osobe za potvrdu ovih podataka.', 'A Reference Document such as a file, URL or contact person to verify this data. You can type the 1st few characters of the document name to link to an existing document.': 'Referentni dokument, poput datoteke, URL-a ili kontakt osobe da se verificiraju ovi podaci. Možete ukucati prvih nekoliko karaktera naziva dokumenta da bi se povezalo s postojećim dokumentom.', 'A strict location hierarchy cannot have gaps.': 'Stroga hijerarhija lokacija ne može imati rupa', 'A task is a piece of work that an individual or team can do in 1-2 days': 'Zadatak je dio posla koji se samostalno ili u timu može završiti za 1-2 dana.', 'A task is a piece of work that an individual or team can do in 1-2 days.': 'Zadatak je dio posla koji se samostalno ili u timu može završiti za 1-2 dana.', 'A Warehouse is a physical place to store items.': 'Skladište je fizičko mjesto za smještanje predmeta.', 'Abbreviation': 'Skraćenica', 'Ability to customize the list of details tracked at a Shelter': 'Sposobnost da se prilagodi lista detalja praćenih u skloništu', 'Ability to customize the list of human resource tracked at a Shelter': 'Sposobnost prilagođavanja liste ljudskih resursa praćene u skloništu', 'Ability to customize the list of important facilities needed at a Shelter': 'Sposobnost da se prilagodi lista važnih objekata potrebnih u skloništu', 'Ability to Fill Out Surveys': 'Mogućnost ispunjavanja ankete', 'Ability to view Results of Completed and/or partially filled out Surveys': 'Mogućnost pregleda rezultata završenih i/ili djelimično popunjenih anketa', 'Abkhazia': 'Abhazija', 'Able to Respond?': 'U mogućnosti odgovoriti?', 'About': 'O programu', 'ABOUT': 'O', 'About Sahana': 'O Sahana', 'About Sahana Eden': 'O Sahana Eden', 'ABOUT THIS MODULE': 'O OVOM MODULU', 'About this module': 'O ovom modulu', 'Above %s': 'Iznad %s', 'Academic': 'Akademska', 'Accept Push': 'Prihvati guranje', 'Accept unsolicited data transmissions from the repository.': 'Prihvati neplanirane prenose podataka iz repozitorija.', 'ACCESS DATA': 'PRISTUPNI PODACI', 'Access denied': 'Zabranjen pristup', 'Access to education services': 'Pristup obrazovnim uslugama', 'Access to Shelter': 'Pristup skloništu', 'Accessibility of Affected Location': 'Pristupnost pogođenih lokacija', 'Accompanying Relative': 'Član rodbine koji je pratnja', 'Account added': 'Nalog dodana', 'Account Registered - Please Check Your Email': 'Korisnički račun registrovan - molimo provjerite svoj Email', 'Account registered, however registration is still pending approval - please wait until confirmation received.': 'Korisnički nalog registrovan, ali prijava još čeka odobrenje od ovlaštene osobe - molimo pričekajte dok se prijava ne odobri.', 'Accuracy': 'Preciznost', 'Acronym': 'Akronim', "Acronym of the organization's name, eg. IFRC.": 'Akronim od naziva organizacije, npr IFRC.', 'act': 'akt', 'Action': 'Akcija', 'ACTION REQUIRED': 'AKCIJA POTREBNA', 'Actionable': 'Djelatno', 'Actionable by all targeted recipients': 'Ima razloga da se djeluje prema svim ciljanim primateljima', 'Actionable only by designated exercise participants; exercise identifier SHOULD appear in <note>': 'Moguće pokrenuti samo od strane imenovanih učesnika vježbe; identifikator vježbe treba da se pojavi u polju <note>', 'Actioned?': 'Riješeno?', 'Actioning officer': 'Zaduženi službenik', 'Actions': 'Akcije', 'Actions taken as a result of this request.': 'Akcije preduzete kao rezultat ovog zahtjeva.', 'Activate': 'Aktiviraj', 'Activate Events from Scenario templates for allocation of appropriate Resources (Human, Assets & Facilities).': 'Aktiviraj događaje iz šablona scenarija za alokaciju odgovarajućih resursa (ljudi, sredstva i objekti).', 'activate to sort column ascending': 'aktivno za sortiranje kolone u rastućem redoslijedu', 'activate to sort column descending': 'aktivno za sortiranje kolone u opadajućem redoslijedu', 'active': 'aktivno', 'Active': 'Aktivno', 'Active Problems': 'Aktivni problemi', 'Active?': 'Aktivan?', 'Activities': 'Aktivnosti', 'Activities matching Assessments': 'Aktivnosti koje odgovaraju procjenama', 'Activities matching Assessments:': 'Aktivnosti koje odgovaraju procjenama:', 'Activities of boys 13-17yrs before disaster': 'Aktivnosti dječaka dobi 13-17 godina prije katastrofe', 'Activities of boys 13-17yrs now': 'Aktivnosti dječaka između 13 i 17 godina', 'Activities of boys <12yrs before disaster': 'Aktivnosti dječaka mlađih od 12 godina prije nepogode', 'Activities of boys <12yrs now': 'Trenutne aktivnosti dječaka <12 godina', 'Activities of children': 'Aktivnosti djece', 'Activities of girls 13-17yrs before disaster': 'Aktivnosti djevojaka 13-17 godina prije katastrofe', 'Activities of girls 13-17yrs now': 'Trenutne aktivnosti djevojčica između 13 i 17 godina', 'Activities of girls <12yrs before disaster': 'Aktivnosti djevojčica mlađih od 12 godina prije katastrofe', 'Activities of girls <12yrs now': 'Aktivnosti djevojčica <12god sada', 'Activities:': 'Aktivnosti:', 'Activity': 'Aktivnost', 'Activity Added': 'Dodana aktivnost', 'Activity added': 'Dodana aktivnost', 'Activity Deleted': 'Obrisana aktivnost', 'Activity Details': 'Detalji aktivnosti', 'Activity Organization': 'Organizacija aktivnosti', 'Activity Organization Added': 'Dodana organizacija aktivnosti', 'Activity Organization Deleted': 'Organizacija aktivnosti obrisana', 'Activity Organization Updated': 'Organizacija aktivnosti ažurirana', 'Activity Organizations': 'Organizacije aktivnosti', 'Activity removed': 'Aktivnost uklonjena', 'Activity Report': 'Izvještaj o aktivnosti', 'Activity Reports': 'Izvještaji aktivnosti', 'Activity Type': 'Tip aktivnosti', 'Activity Type Added': 'Dodan tip aktivnosti', 'Activity Type added to Activity': 'Tip aktivnosti dodan u aktivnost', 'Activity Type added to Project Location': 'Vrsta aktivnosti dodana na lokaciju projekta', 'Activity Type Deleted': 'Izbrisan tip aktivnosti', 'Activity Type removed from Activity': 'Tip aktivnosti uklonjen iz aktivnosti', 'Activity Type removed from Project Location': 'Vrsta aktivnosti uklonjena iz lokacije projekta', 'Activity Type Updated': 'Ažuriran tip aktivnosti', 'Activity Types': 'Tipovi aktivnosti', 'Activity Updated': 'Djelatnost ažurirana', 'Activity updated': 'Aktivnost ažurirana', 'Add': 'Dodati', 'Add %(site_label)s Status': 'Dodaj %(site_label)s status', 'Add a new certificate to the catalog.': 'Dodaj novi certifikat u katalog', 'Add a new competency rating to the catalog.': 'Dodaj novu ocjenu sposobnosti u katalog.', 'Add a new course to the catalog.': 'Dodaj novi kurs u katalog', 'Add a New Inventory Location': 'Dodaj novu lokaciju skladišta', 'Add a new job role to the catalog.': 'Dodaj novu poziciju u katalog', 'Add a new program to the catalog.': 'Dodaj novi program u katalog', 'Add a New Relief Item': 'Dodaj novu stavku pomoći', 'Add a new Site from where the Item is being sent.': 'Navedite mjesto gdje se šalje ova stavka.', 'Add a new skill provision to the catalog.': 'Dodaj novu zalihu vještina u katalog.', 'Add a new skill to the catalog.': 'Dodaj novu vještinu u katalog', 'Add a new skill type to the catalog.': 'Dodaj novi tip vještine u katalog', 'Add a new vehicle category': 'Dodaj novu kategoriju vozila', 'Add a new vehicle type': 'Dodaj novi tip vozila.', 'Add a Person': 'Dodaj osobu', 'Add a Reference Document such as a file, URL or contact person to verify this data.': 'Dodaj prateći dokument u vidu datoteke, URL-a ili kontakt osobe za potvrdu ovih podataka.', 'Add a Reference Document such as a file, URL or contact person to verify this data. If you do not enter a Reference Document, your email will be displayed instead.': 'Dodajte referencu, kao što je dokument, URL ili kontakt osobu da potvrdi ove podatke. Ako ne stavite referencu, prikazat će se vaš mail.', 'Add a Volunteer': 'Dodaj volontera', 'Add Activity': 'Dodaj aktivnost', 'Add Activity Report': 'Dodaj izvještaj o radu', 'Add Activity Type': 'Dodaj tip aktivnosti', 'Add Address': 'Dodaj Adresu', 'Add Affiliation': 'Dodaj namještenje', 'Add Aid Request': 'Dodaj zahtjev za pomoć', 'Add all organizations which are involved in different roles in this project': 'Dodaj sve organizacije koje su uključene u različite uloge u ovom projektu', 'Add Alternative Item': 'Dodaj alternativnu stavku', 'Add an image, such as a Photo.': 'Dodaj sliku,kao sto je fotografija', 'Add an Photo.': 'Dodaj fotografiju.', 'Add Annual Budget': 'Dodaj godišnji budžet', 'Add Appraisal': 'Dodaj ispunjenje', 'Add Assessment': 'Dodaj procjenu', 'Add Assessment Summary': 'Dodaj sažetak o procjeni', 'Add Asset': 'Dodaj sredstvo', 'Add Asset Log Entry - Change Label': 'Dodaj stavku zapisnika sredstava - promijeni oznaku', 'Add Availability': 'Dodaj dostupnost', 'Add Award': 'Dodaj nagradu', 'Add Baseline': 'Dodaj referentnu tačku', 'Add Baseline Type': 'Dodaj tip referentne tačke', 'Add Bed Type': 'Dodaj vrstu ležaja', 'Add Beneficiaries': 'Dodaj korisnike', 'Add Bin Type': 'Dodaj tip korpe', 'Add Bins': 'Dodaj korpe', 'Add Bookmark': 'Dodaj zabilješku', 'Add Branch Organization': 'Dodaj pripadnu organizaciju', 'Add Brand': 'Dodaj marku proizvoda', 'Add Budget': 'Dodaj budžet', 'Add Bundle': 'Dodaj paket', 'Add Camp': 'Dodaj kamp', 'Add Camp Service': 'Dodaj uslugu kampa', 'Add Camp Status': 'Dodaj status kampa', 'Add Camp Type': 'Dodaj tip kampa', 'Add Campaign Message': 'Dodaj poruku kampanje', 'Add Catalog': 'Dodaj Katalog', 'Add Catalog Item': 'Dodaj katalog stavku', 'Add Catalog.': 'Dodaj katalog.', 'Add Category': 'Dodaj kategoriju', 'Add Category<>Sub-Category<>Catalog Relation': 'Dodaj Kategorija<>Podkategorija<>kataloški odnos', 'Add Certificate': 'Dodaj certifikat', 'Add Certificate for Course': 'Dodaj certifikat za kurs', 'Add Certification': 'Dodja certifikat', 'Add Cholera Treatment Capability Information': 'Dodajte informacije o sposobnosti liječenja kolere', 'Add Cluster': 'Dodaj skup', 'Add Cluster Subsector': 'Dodaj podsektor skupa', 'Add Competency': 'Dodaj stručnost', 'Add Competency Rating': 'Dodaj ocjenu kompetentnosti', 'Add Config': 'Dodaj konfiguraciju.', 'Add Contact': 'Dodaj kontakt', 'Add Contact Information': 'Dodajte kontakt informacije', 'Add Course': 'Dodaj kurs', 'Add Course Certicate': 'Dodaj certifikat kursa', 'Add Credential': 'Dodaj akreditiv', 'Add Credentials': 'Dodaj akreditive', 'Add Data to Theme Layer': 'Dodaj podatke tematskom sloju', 'Add Dead Body Report': 'Dodaj izvještaj o preminulim osobama', 'Add Disaster Victims': 'Dodaj žrtve nepogode', 'Add Distribution': 'Dodaj distribuciju', 'Add Distribution Item': 'Dodaj stavku raspodjele', 'Add Distribution.': 'Dodaj distribuciju', 'Add Document': 'Dodaj dokument', 'Add Donation': 'Dodaj donaciju', 'Add Donor': 'Dodaj donatora', 'Add Education Detail': 'Dodaj detalje o obrazovanju', 'Add Education Level': 'Dodaj nivo obrazovanja', 'Add Email Account': 'Dodaj e-mail nalog', 'Add Facility': 'Dodaj objekat', 'Add Feature Class': 'Dodaj klasu karakteristika', 'Add Feature Group': 'Dodaj grupu karakteristika', 'Add Feature Layer': 'Dodaj sloj karakteristika', 'Add Find Report': 'Dodaj novi izvještaj o traženju', 'Add Flood Report': 'Dodaj izvještaj o poplavi', 'Add GIS Feature': 'Dodaj GIS karakteristiku', 'Add GPS data': 'Dodaj GPS podatke', 'Add Group': 'Dodaj grupu', 'Add Group Member': 'Dodaj člana grupe', 'Add Group Membership': 'Dodaj članstvo grupe', 'Add Hospital': 'Dodaj Bolnicu', 'Add Hours': 'Dodaj sate', 'Add Human Resource': 'Dodaj ljudski resurs', 'Add Identification Report': 'Dodaj izvještaj o identifikaciji', 'Add Identity': 'Dodaj identitet', 'Add Image': 'Dodaj sliku', 'Add Impact': 'Dodaj utjecaj', 'Add Impact Type': 'Dodaj tip utjecaja', 'Add Incident': 'Dodaj incident', 'Add Incident Report': 'Dodaj izvještaj o incidentu', 'Add Incoming Email': 'Dodaj dolaznu e-pošte', 'Add Incoming SMS': 'Dodaj dolazni SMS', 'Add Inventory Item': 'Dodaj stavkuinventara', 'Add Inventory Store': 'Dodaj novi smještaj inventara', 'Add Item': 'Dodaj stavku', 'Add Item (s)': 'Dodaj stavku', 'Add Item Catalog': 'Dodaj katalog stavki', 'Add Item Catalog Category': 'Dodaj kategoriju stavke kataloga', 'Add Item Category': 'Dodaj kategoriju stavke', 'Add Item Pack': 'Dodaj paket stavki', 'Add Item Packet': 'Dodaj paket stavki', 'Add Item Sub-Category': 'Dodaj podkategoriju stavke', 'Add Item to Catalog': 'Dodaj stavku u katalog', 'Add Item to Commitment': 'Dodaj stavku zaduženja', 'Add Item to Inventory': 'Dodaj stavku u inventar', 'Add Item to Request': 'Dodati stavku na zahtjev', 'Add Item to Shipment': 'Dodaj Stavku u Pošiljku', 'Add Item to Stock': 'Dodaj stavku u zalihu', 'Add Job Role': 'Dodajte poziciju za posao', 'Add Key': 'Dodaj ključ', 'Add Keyword': 'Dodaj ključnu riječ', 'Add Kit': 'Dodaj komplet', 'Add Layer': 'Dodaj sloj', 'Add Layer to this Profile': 'Dodaj sloj u profil', 'Add Level 1 Assessment': 'Dodaj procjenu nivoa 1', 'Add Level 2 Assessment': 'Dodaj 2. level procjene', 'Add Line': 'Dodaj liniju', 'Add Location': 'Dodaj Lokaciju', 'Add Locations': 'Dodaj lokacije', 'Add Log Entry': 'Dodaj stavku zapisnika', 'Add main Item Category.': 'Dodaj glavnu kategoriju stavke', 'Add main Item Sub-Category.': 'Dodaj glavnu podkategoriju stavke', 'Add Map Profile': 'Dodaj podešenje mape', 'Add Marker': 'Dodaj oznaku', 'Add Member': 'Dodaj novog člana', 'Add Membership': 'Dodaj članstvo', 'Add Message': 'Dodaj poruku', 'Add Metadata': 'Dodaj metapodatke', 'Add Mission': 'Dodaj misiju', 'Add Mobile Commons Settings': 'Dodaj mobilne postavke', 'Add Need': 'Dodaj potrebu', 'Add Need Type': 'Dodaj tip potrebe', 'Add New': 'Dodaj nov', 'Add New Activity': 'Dodaj novu aktivnost', 'Add New Address': 'Dodaj novu adresu', 'Add New Aid Request': 'Dodaj novi zahtjev za pomoć', 'Add New Alternative Item': 'Dodaj novi alternativni artikl', 'Add New Assessment': 'Dodaj novu procjenu', 'Add New Assessment Summary': 'Dodaj novi rezime procjene', 'Add New Asset': 'Dodaj novo sredstvo', 'Add New Baseline': 'Dodaj novu referentnu tačku', 'Add New Baseline Type': 'Dodaj novi tip referentne tačke', 'Add New Bin': 'Dodaj novu korpu', 'Add New Bin Type': 'Dodaj novi tip korpe', 'Add New Brand': 'Dodaj novu marku', 'Add New Budget': 'Dodaj novi budžet', 'Add New Bundle': 'Dodaj novi paket', 'Add New Camp': 'Dodaj Novi Kamp', 'Add New Camp Service': 'Dodaj novu uslugu Kampa', 'Add New Camp Type': 'Dodaj novi tip kampa', 'Add New Catalog': 'dodaj novi katalog', 'Add New Catalog Item': 'Dodaj novu stavku kataloga', 'Add New Cluster': 'Dodaj novi skup', 'Add New Cluster Subsector': 'Dodajte novi podsektor skupa', 'Add New Commitment Item': 'Dodaj novo zaduženje', 'Add New Config': 'Dodaj novu konfiguraciju.', 'Add New Contact': 'Dodaj Novi Kontakt', 'Add New Credential': 'Dodaj novi akreditiv', 'Add New Distribution': 'Dodaj novu raspodjelu', 'Add New Document': 'Dodaj novi dokument', 'Add New Donor': 'Dodaj novog donatora', 'Add New Entry': 'Dodaj novi element', 'Add New Event': 'Dodaj novi događaj', 'Add New Facility': 'Dodaj novi objekt', 'Add New Feature Class': 'Dodaj novu klasu karakteristika', 'Add New Feature Group': 'Dodaj novu grupu karakteristika', 'Add New Feature Layer': 'Dodaj novi sloj karakteristika', 'Add New Find Report': 'Dodaj novi izvještaj o traženju', 'Add New Flood Report': 'Dodaj novi izvještaj o poplavi', 'Add New Group': 'Dodaj novu grupu', 'Add new Group': 'Dodaj novu grupu', 'Add New Group Membership': 'Dodaj novo članstvo grupe', 'Add New Home': 'Dodaj novi dom', 'Add New Hospital': 'Dodaj novu bolnicu', 'Add New Human Resource': 'Dodaj novi ljudski resurs', 'Add New Identity': 'Dodaj novi identitet', 'Add New Image': 'Dodaj novu sliku', 'Add New Impact': 'Dodaj novi utjecaj', 'Add New Impact Type': 'Dodaj novi tip utjecaja', 'Add New Incident': 'Dodaj novi incident', 'Add New Incident Report': 'Dodaj novi izvještaj o incidentu', 'Add new Individual': 'Dodaj novu osobu', 'Add New Information': 'Dodaj nove informacija', 'Add New Inventory Item': 'Dodaj novi artikl u inventar', 'Add New Inventory Store': 'Dodaj novi smještaj inventara', 'Add New Item': 'Dodajte novu stavku', 'Add New Item Catalog': 'Kreiraj novu stavku kataloga', 'Add New Item Catalog Category': 'Dodaj novu kategoriju stavke kataloga', 'Add New Item Category': 'Dodaj novu kategoriju', 'Add New Item Pack': 'Dodaj novi paket stavki', 'Add New Item Packet': 'Dodaj novi paket staviki', 'Add New Item Sub-Category': 'Dodaj novu podkategoriju stavke', 'Add New Item to Kit': 'Dodaj novu stavku u komplet', 'Add New Key': 'Dodaj novi ključ', 'Add New Kit': 'Dodaj novi komplet', 'Add New Layer': 'Dodaj novi sloj', 'Add New Level 1 Assessment': 'Dodaj Procjenu za Novi Nivo 1', 'Add New Level 2 Assessment': 'Dodaj novu procjenu nivoa 2', 'Add New Location': 'Dodaj novu lokaciju', 'Add New Log Entry': 'Dodaj novi unos zapisnika', 'Add New Map Profile': 'Dodajte novu konfiguraciju plana', 'Add New Marker': 'Dodaj novi marker', 'Add New Member': 'Dodajte Novog Člana', 'Add New Membership': 'Dodaj novo članstvo', 'Add New Metadata': 'Dodaj novi metapodatak ', 'Add New Need': 'Dodajte novu potrebu', 'Add New Need Type': 'Dodaj Novi Tip Potrebe', 'Add New Note': 'Kreiraj novu bilješku', 'Add New Office': 'Dodaj novi ured', 'Add New Organization': 'Dodaj novu organizaciju', 'Add New Partner': 'Dodaj novog partnera', 'Add new Patient': 'Dodaj novog pacijenta', 'Add New Patient': 'Dodaj novog pacijenta', 'Add New Peer': 'Dodaj novog suradnika', 'Add New Person': 'Dodaj novu osobu.', 'Add New Person to Commitment': 'Dodaj novu osobu u zaduženje', 'Add new person.': 'Dodaj novu osobu.', 'Add New Photo': 'Dodaj novu fotografiju', 'Add New Population Statistic': 'Dodati novu statistiku populacije', 'Add new position.': 'Dodaj novu poziciju.', 'Add New Problem': 'Dodaj novi problem', 'Add New Project': 'Dodaj novi projekt', 'Add new project.': 'Dodaj novi projekat', 'Add New Projection': 'Dodaj novu projekciju', 'Add New Rapid Assessment': 'dodaj novu brzu procjenu', 'Add New Received Item': 'Dodaj novi primljeni predmet', 'Add New Record': 'Dodaj novi zapis', 'Add New Relative': 'Dodaj novog srodnika', 'Add New Relief Item': 'Dodaj novu stavku pomoći', 'Add New Report': 'Dodaj novi izvještaj', 'Add New Request': 'Dodaj novi zahtjev', 'Add New Request Item': 'Dodaj novu stavku zahtjeva', 'Add New Resource': 'Dodaj novi resurs', 'Add New Resource Type': 'Dodaj novi tip resursa', 'Add New Response': 'Dodaj novi odgovor', 'Add New River': 'dodaj novu rijeku', 'Add New Role': 'Dodaj novu ulogu', 'Add New Role to User': 'Dodajte novu ulogu korisniku', 'Add New Room': 'Dodaj novu prostoriju', 'Add New Scenario': 'Dodaj novi scenarij', 'Add New School District': 'Dodaj novi školski rejon', 'Add New School Report': 'Dodaj novi izvještaj o školama', 'Add New Sent Item': 'Dodaj novu poslanu stavku', 'Add New Setting': 'Dodaj novu postavku', 'Add New Shelter': 'Dodaj novo sklonište', 'Add New Shelter Service': 'Dodavanje nove usluge skloništa', 'Add New Shelter Type': 'Dodaj novi tip skloništa', 'Add New Shipment to Send': 'Dodaj novu pošiljku u slanje', 'Add New Site': 'Dodaj novo mjesto', 'Add New Skill': 'Dodaj novu vještinu', 'Add New Skill Type': 'Dodaj novi tip vještine', 'Add New Solution': 'Dodaj novo rješenje', 'Add New Source': 'Dodaj novi izvor', 'Add New Staff': 'Dodaj novo osoblje', 'Add New Staff Member': 'Dodaj novog člana osoblja', 'Add new staff role.': 'Dodati u ulogu osoblja', 'Add New Staff Type': 'Dodaj novi tip osoblja', 'Add new staff.': 'Dodaj novo osoblje.', 'Add New Storage Location': 'Dodaj novu lokaciju o smještaju', 'Add New Subsector': 'Dodaj novi podsektor', 'Add New Survey Answer': 'Dodaj novi odgovor za anketu', 'Add New Survey Question': 'Dodaj novo anketno pitanje', 'Add New Survey Section': 'Dodaj novo pitanje za anketu', 'Add New Survey Series': 'Dodaj novi niz anketa', 'Add New Survey Template': 'Dodajte novi ankentni šablon', 'Add New Task': 'Dodaj novi zadatak', 'Add New Team': 'Dodaj novi tim', 'Add New Theme': 'Dodaj novu temu', 'Add New Ticket': 'Dodaj novu karticu', 'Add New Track': 'Dodaj novo praćenje', 'Add New Unit': 'Dodaj novu jedinicu', 'Add New User': 'Dodaj novog korisnika', 'Add New User to Group': 'Doda novog korisnika u grupu', 'Add New User to Role': 'Dodaj novog korisnika ulozi', 'Add New Vehicle': 'Dodaj novo vozilo', 'Add New Vehicle Type': 'Dodaj novi tip VOZILA', 'Add New Volunteer': 'Dodaj novog volontera', 'Add New Warehouse': 'Dodajte novo skladište', 'Add New Warehouse Item': 'Dodaj novu stavku skladišta', 'Add Note': 'Dodaj bilješku', 'Add Office': 'Dodaj ured', 'Add or Update': 'Dodavanje ili ažuriranje', 'Add Order': 'Dodaj narudžbu', 'Add Organization': 'Dodaj organizaciju', 'Add Organization Domain': 'Dodaj domenu organizaciju', 'Add Organization Needs': 'Dodaj potrebe organizacije', 'Add Organization to Activity': 'Dodaj organizaciju u aktivnost', 'Add Organization to Project': 'Dodaj organizaciju projektu', 'Add Participant': 'Dodaj učesnika', 'Add Partner': 'Dodaj partnera', 'Add Peer': 'Dodaj saradnika', 'Add People to Commitment': 'Dodaj ljude u zaduženje', 'Add Person': 'Dodaj osobu', 'Add Person to Commitment': 'Dodaj osobu u zaduženje', "Add Person's Details": 'Dodaj detalje o osobi', 'Add Personal Effects': 'Dodaj vlastite efekte', 'Add Photo': 'Dodajte sliku', 'Add Point': 'Dodaj tačku', 'Add Polygon': 'Dodaj mnogougao', 'Add Population Statistic': 'Dodaj statistiku o stanovništvu', 'Add Position': 'Dodaj poziciju', 'Add Problem': 'Dodaj problem', 'Add Professional Experience': 'Dodaj profesionalno iskustvo', 'Add Profile Configuration for this Layer': 'Dodaj konfiguraciju profila za ovaj sloj', 'Add Project': 'Dodaj projekat', 'Add Projection': 'Dodajte projekciju', 'Add Projections': 'Dodaj projekciju', 'Add Question': 'Dodaj pitanje', 'Add Rapid Assessment': 'Dodaj Brzu Procjenu', 'Add Recipient': 'Dodaj primaoca', 'Add Recipient Site': 'Dodaj mjesto primaoca', 'Add Recipient Site.': 'Dodaj lokaciju primaoca', 'Add Record': 'Dodaj zapis', 'Add Recovery Report': 'Dodaj izvještaj o pronalaženju', 'Add Reference Document': 'Dodaj prateći/referentni dokument', 'Add Region': 'Dodaj područje', 'Add Relief Item': 'Dodaj stavku pomoći', 'Add Report': 'Dodaj izvještaj', 'Add Request': 'Dodaj zahtjev', 'Add Request Detail': 'Dodaj detalje o zahtjevu', 'Add Request Template': 'Dodaj predložak zahtjeva', 'Add Resource': 'Dodaj Resurs', 'Add Resource Type': 'Dodaj tip resursa', 'Add Response': 'Dodaj odgovor', 'Add Response Summary': 'Dodaj sumarni odgovor', 'Add River': 'Dodaj rijeku', 'Add Role': 'Dodaj ulogu', 'Add Room': 'Dodajte prostoriju', 'Add RSS Settings': 'Dodaj RSS Postavke', 'Add School District': 'Dodaj školski rejon', 'Add School Report': 'Dodaj školski izvještaj', 'Add Section': 'Dodaj sekciju', 'Add Sector': 'Dodaj sektor', 'Add Sender Site.': 'Dodaj lokaciju pošiljaoca.', 'Add Service': 'Dodaj usluga', 'Add Service Profile': 'Dodaj profil usluga', 'Add Setting': 'Dodaj Postavke', 'Add Shelter': 'Dodaj sklonište', 'Add Shelter Service': 'Dodaj uslugu skloništa', 'Add Shelter Type': 'Dodaj tip skloništa', 'Add Site Needs': 'Dodaj potrebe mjesta', 'Add Skill': 'Dodaj vještinu', 'Add Skill Equivalence': 'Dodaj ekvivalenciju vještine', 'Add Skill Provision': 'Dodaj pružanje vještina', 'Add Skill to Request': 'Dodati vještinu u zahtjev', 'Add Skill Type': 'Dodaj tip vještine', 'Add Skill Types': 'Dodaj tip vještine', 'Add Solution': 'Dodaj rješenje', 'Add Source': 'Dodaj izvor', 'Add Staff': 'Dodaj članove osoblja', 'Add Staff Member': 'Dodaj člana osoblja', 'Add staff members': 'Dodaj članove osoblja', 'Add Staff Type': 'Dodaj tip osoblja', 'Add Status': 'Dodaj status', 'Add Stock to Warehouse': 'Dodaj zalihu u skladište', 'Add Storage Bin Type': 'Dodaj tip korpe za smještaj', 'Add Storage Location': 'Dodaj lokaciju skladištenja', 'Add strings manually': 'Dodaj stringove ručno', 'Add strings manually through a text file': 'Dodaj nizove znakova ručno kroz tekstualnu datoteku', 'Add Sub-Category': 'Traži potkategoriju', 'Add Subscription': 'Dodaj pretplatu', 'Add Subsector': 'Dodaj podsektor', 'Add Survey Answer': 'Dodaj odgovor na istraživanje', 'Add Survey Question': 'Dodajte anketno pitanje', 'Add Survey Section': 'Dodajte anketnu sekciju', 'Add Survey Series': 'Dodajte niz anketa', 'Add Survey Template': 'Dodaj predložak anketi', 'Add Symbology to Layer': 'Dodaj značenje simbola na sloj', 'Add Task': 'Dodaj zadatak', 'Add Team': 'Dodaj tim', 'Add Team Member': 'Dodaj člana tima', 'Add the Storage Bin Type.': 'Dodaj tip korpe za smještaj', 'Add the Storage Location where this bin is located.': 'Dodaj lokaciju gdje je ova korpa.', 'Add the Storage Location where this this Bin belongs to.': 'Dodaj lokaciju gdje ova korpa pripada.', 'Add Theme': 'Dodaj temu', 'Add this entry': 'Dodaj ovaj unos', 'Add Ticket': 'Dodaj karticu', 'Add to a Team': 'Dodaj u Tim', 'Add to Bin': 'Dodaj u korpu', 'Add to budget': 'Dodati budžetu', 'Add to Bundle': 'Dodaj u paket', 'Add to Catalog': 'Dodaj u katalog', 'Add to Feature Group': 'Dodaj u grupu karakteristika', 'Add Training': 'Dodaj trening', 'Add Translation Language': 'Dodaj jezik za prevođenje', 'Add Twilio Settings': 'Dodaj Twilio postavke', 'Add Twitter Search Query': 'Dodaj Twitter upit za pretragu', 'Add Unit': 'Dodaj Jedinicu', 'Add User': 'Dodaj korisnika', 'Add Vehicle': 'Dodaj vozilo', 'Add Vehicle Category': 'Dodaj kategoriju vozila', 'Add Vehicle Detail': 'Dodaj detalje o vozilu', 'Add Vehicle Details': 'Dodaj detalje o vozilu', 'Add Vehicle Type': 'Dodaj tip vozila', 'Add Volunteer': 'Dodajte volontera', 'Add Volunteer Availability': 'Dodaj dostupnost volontera', 'Add Volunteer Registration': 'Dodaj registraciju volontera', 'Add volunteers': 'Dodaj volontere', 'Add Warehouse': 'Dodaj skladište', 'Add Warehouse Item': 'Dodaj stavku skladišta', 'Add...': 'Dodaj...', 'Add/Edit/Remove Layers': 'Dodaj/Uredi/Obriši slojeve', 'added': 'dodano', 'Added to Group': 'Dodano u grupu', 'Added to Team': 'Dodano u tim', 'Additional Beds / 24hrs': 'Dodatni kreveti / 24 sata', 'Additional Comments': 'Dodatni komentari', 'Additional quantity quantifier – i.e. “4x5”.': 'Dodatni kvantifikator količine, tj. “4x5”.', 'Address': 'Adresa', 'Address added': 'Dodana adresa', 'Address deleted': 'Obrisana adresa', 'Address Details': 'Detalji adrese', 'Address Found': 'Pronađena adresa', 'Address Mapped': 'Adresa mapirana', 'Address NOT Found': 'Nije pronađena adresa', 'Address NOT Mapped': 'Adresa NIJE mapirana', "Address of an image to use for this Layer in the Legend. This allows use of a controlled static image rather than querying the server automatically for what it provides (which won't work through GeoWebCache anyway).": 'Adresa slike koja će se koristiti za ovaj sloj u legendi. Ovo će omogućiti upotrebu kontrolisane statičke slike umjesto automatskog upita servera za ono što on pruža (što neće raditi s GeoWebCache )', 'Address Type': 'Tip adrese', 'Address updated': 'Ažurirana adresa', 'Addresses': 'Adrese', 'Adequate': 'Odgovarajuće', 'Adequate food and water available': 'Dostupna adekvatna hrana i voda', 'Adjust Item Quantity': 'Prilagodi količinu stavke', 'Adjust Items due to Theft/Loss': 'Prilagodi stavke zbog krađe/gubitka', 'Adjust Stock': 'Prilagodi zalihu', 'Adjust Stock Item': 'Prilagodi stavku zalihe', 'Adjust Stock Levels': 'Prilagodi nivo zalihe', 'Adjustment created': 'Prilagođenje kreirano', 'Adjustment deleted': 'Prilagođenje obrisano', 'Adjustment modified': 'Prilagođenje izmijenjeno', 'Admin Email': 'Email administratora', 'Admin Name': 'Ime administratora', 'Admin Tel': 'Telefon administratora', 'Administration': 'Administracija', 'Admissions/24hrs': 'Ulazi/24 sata', 'Adolescent (12-20)': 'Adolescent (12-20)', 'Adolescent participating in coping activities': 'Učestvovanje adolescenata u aktivnostima prilagođavanja', 'Adult (21-50)': 'Odrasli (21-50)', 'Adult female': 'Odrasla ženska osoba', 'Adult ICU': 'Intenzivna njega za odrasle', 'Adult male': 'Odrasli muškarac', 'Adult Psychiatric': 'Psihijatar za odrasle', 'Adults in prisons': 'Odrasli u zatvoru', 'advanced': 'napredno', 'Advanced Bin Search': 'Napredna pretraga korpi', 'Advanced Catalog Search': 'Napredna pretraga kataloga', 'Advanced Category Search': 'Napredna pretraga kategorija', 'Advanced Item Search': 'Napredna pretragastavki', 'Advanced Sub-Category Search': 'Napredna pretraga podkategorije', 'Advanced Unit Search': 'Napredna pretragajedinica', 'Advanced:': 'Napredno:', 'Advisory': 'Savjeti', 'Advocacy': 'Advokatura', 'Affected Persons': 'Osobe na koje je bio utjecaj', 'Affiliation added': 'Preduzeće dodano', 'Affiliation deleted': 'Preduzeće obrisano', 'Affiliation Details': 'Detalji preduzeća', 'Affiliation updated': 'Preduzeće ažurirano', 'Affiliations': 'Preduzeća', 'Afghanistan': 'Afganistan', 'After clicking on the button, a set of paired items will be shown one by one. Please select the one solution from each pair that you prefer over the other.': 'Nakon što kliknete na dugme, pojavit će se niz stavki u paru jedan za drugim. Molimo da odaberete jedno rješenje iz svakog para koje preferirate.', 'After clicking on the Vote button ... (#TODO [String]) Please select the one item from each pair that you prefer over the other.': 'Nakon što kliknete na dugme glasaj, (#TODO [String]). Molimo da odaberete jednu stavku iz svakog para koje preferirate.', 'Age': 'Starost', 'Age group': 'Starosna grupa', 'Age Group': 'Starosna grupa', 'Age group does not match actual age.': 'Starosna grupa ne odgovara stvarnim godinama.', 'Aggravating factors': 'Otežavajući faktori', 'Agriculture': 'Poljoprivreda', 'Aid Management': 'Upravljanje pomoći', 'Aid Request': 'Dodaj zahtjev', 'Aid Request added': 'Zahtjev za pomoć dodan', 'Aid Request Details': 'Detalji o zahtjevu za pomoć', 'Aid Request updated': 'Zahtijev za pomoć je ažuriran', 'Aid Requests': 'Dodaj zahtjeve', 'Air Transport Service': 'Usluga zračnog prijevoznog sredstva', 'Aircraft Crash': 'Pad aviona', 'Aircraft Hijacking': 'Avionska otmica', 'Aircraft Maximum Size': 'Maksimalna veličina aviona', 'Airport': 'Aerodrom', 'Airport added': 'Aerodrom dodan', 'Airport Closure': 'Zatvaranje aerodroma', 'Airport deleted': 'Aerodrom obrisan', 'Airport Details': 'Detalji aerodroma', 'Airport updated': 'Aerodrom ažuriran', 'Airports': 'Aerodromi', 'Airspace Closure': 'Zatvaranje zračnog prostora', 'Albania': 'Albanija', 'Alcohol': 'Alkohol', 'Alcoholics': 'Alkoholičari', 'Alert': 'Uzbuna', 'Alimentary Support Vehicle': 'Vozila za hitnu podršku', 'All': 'Sve', 'ALL': 'Sve', 'All data provided by the Sahana Software Foundation from this site is licenced under a Creative Commons Attribution licence. However, not all data originates here. Please consult the source field of each entry.': 'Svi podaci obezbjeđeni od strane Sahana Software fondacije sa ove stranice su licencirani pod Creative Commons Attribution licencom. Međutim, svi podaci ne potiču odavde. Molimo, pregledajte polje izvora svakog pristupa.', 'All data provided by the Sahana Software Foundation from this site is licensed under a Creative Commons Attribution license. However, not all data originates here. Please consult the source field of each entry.': 'Svi podaci obezbjeđeni od strane Sahana Software fondacije sa ove stranice su licencirani pod Creative Commons Attribution licencom. Međutim, svi podaci ne potiču odavde. Molimo, pregledajte polje izvora svakog pristupa.', 'All Entities': 'Sve jedinke', 'All Inbound & Outbound Messages are stored here': 'Sve ulazne i izlazne poruke su smještene ovdje', 'All Open Tasks': 'Svi otvoreni zadaci', 'All Pledges': 'Svi zahtjevi', 'All Records': 'Svi zapisi', 'all records': 'svi zapisi', 'All Requested Items': 'Sve zahtijevani stavke', 'All Resources': 'Svi Resursi', 'All selected': 'Sve izabrano', 'All Tasks': 'Svi zadaci', 'Allowed to push': 'Dozvoljeno gurnuti', 'allows a budget to be developed based on staff & equipment costs, including any admin overheads.': 'dozvoljava da se budžet uspostavi na osnovu troškova osoblja i opreme, uključujući bilo koje režijske troškove administratora.', 'Allows a Budget to be drawn up': 'Dozvoljava izradu budžeta', 'Allows authorized users to control which layers are available to the situation map.': 'Omogućava ovlaštenim korisnicima da kontrolišu koji slojevi su dostupni na karti situacije.', 'Allows authorized users to upload multiple features into the situation map.': 'Omogućava ovlaštenim korisnicima da pošalju više karakteristima na kartu situacije.', 'allows for creation and management of assessments.': 'dozvoljava kreiranje i upravljanje procjenama.', 'allows for creation and management of surveys to assess the damage following a natural disaster.': 'dozvoljava kreiranje i upravljanje istraživanjima za procjenu nesreće uzrokovane prirodnom katastrofom', 'Already in this Feature Group!': 'Već je u ovoj grupi karakteristika', 'Alternative infant nutrition in use': 'Alternativa u prehrani djece', 'Alternative Item': 'Alternativna stavka', 'Alternative Item added': 'Alternativna stavka dodana', 'Alternative Item deleted': 'Alternativna stavka obrisana', 'Alternative Item Details': 'Detalji alternativne stavke', 'Alternative Item updated': 'Alternativna stavka ažurirana', 'Alternative Items': 'Alternativne stavke', 'Alternative places for studying': 'Alternativna mjesta za učenje', 'Alternative places for studying available': 'Dostupna alternativna mjesta za studiranje', 'always update': 'uvijek ažuriraj', 'Ambulance Service': 'Usluge u ambulanti', 'Amount': 'Iznos', 'Amount of the Project Budget spent at this location': 'Iznos budžeta projekta potrošen na ovoj lokaciji', 'An Assessment Template can be selected to create a Disaster Assessment. Within a Disaster Assessment, responses can be collected and results can analyzed as tables, charts and maps': 'Predložak procjene se može izabrati za kreiranje procjene katastrofe. Unutar procjene katastrofe, odgovori se mogu sakupiti a rezultati analizirani kao tabele, dijagrami i mape.', 'An error occured, please %(reload)s the page.': 'Desila se greška, molim %(reload)s stranicu.', 'An ESRI Shapefile (zipped)': 'ESRI indeks datoteka s likovima (kompresovana zip)', 'an individual/team to do in 1-2 days': 'pojedinac-tim da to uradi u 1 do 2 dana', 'An intake system, a warehouse management system, commodity tracking, supply chain management, procurement and other asset and resource management capabilities.': 'Usisni sistem, sistem upravljanja skladšitem, praćenje robe, upravljanje lancem nabavke, nabavka i ostala sredstva, te sposobnosti upravljanja resursima.', 'An interactive map the situation.': 'Interaktivna mapa situacije', 'An Item Category must have a Code OR a Name.': 'Kategorija sredstva mora imati šifru ili ime.', 'An item which can be used in place of another item': 'Stavka koja se može koristiti umjesto druge stavke', 'Analysis of assessments': 'Analiza procjena', 'Analysis of Completed Surveys': 'Analiza kompletiranih anketa', 'Analyze with KeyGraph': 'Analiza pomoću KeyGraph', 'Anamnesis': 'Anamneza', 'and': 'i', 'Andorra': 'Andora', 'angular': 'uglaono', 'Animal Die Off': 'Izumiranje životinja', 'Animal Feed': 'Hrana za životinje', 'Animals': 'Životinje', 'Annual Budget': 'Godišnji budžet', 'Annual Budget deleted': 'Obrisan godišnji budžet', 'Annual Budget updated': 'Ažuriran godišnji budžet', 'Annual Budgets': 'Godišnji budžeti', 'Anonymous': 'Anoniman', 'anonymous user': 'anonimni korisnik', 'Answer Choices (One Per Line)': 'Izbor odgovora (Jedan po liniji)', 'Anthropology': 'Antropologija', 'Antibiotics available': 'Dostupni antibiotici', 'Antibiotics needed per 24h': 'Antibiotici potrebni u 24 sata', 'Antigua and Barbuda': 'Antigua i Barbuda', 'Any': 'Bilo koji', 'ANY': 'BILO KOJE', 'Any comments about this sync partner.': 'Neki komentari o sinhronizacijskom partneru', 'API is documented here': 'Aplikacijski programerski interfejs (API) je ovdje dokumentiran', 'API Key': 'API ključ', 'Apparent Age': 'Prividne godine', 'Apparent Gender': 'Vidljiv spol', 'Appearance': 'Izgled', 'Applicable to projects in Pacific countries only': 'Primjenjivo samo na projekte u pacifičkim zemljama', 'Application': 'Aplikacija', 'Application Deadline': 'Zadnji rok za prijavu', 'Application Permissions': 'Aplikacijske dozvole', 'Apply': 'Primijeni', 'Appraisal added': 'Ispunjenje dodano', 'Appraisal deleted': 'Ispunjenje obrisano', 'Appraisal Details': 'Detalji ispunjenja', 'Appraisal updated': 'Ispunjenje ažurirano', 'Appraisals': 'Ispunjenja', 'Appropriate clothing available': 'Odgovarajuća odjeća dostupna', 'Appropriate cooking equipment/materials in HH': 'Odgovarajuća oprema za kuhanje/materijali u domaćinstvu', 'Approve': 'Odobri', 'approved': 'Odobreno', 'Approved': 'Odobreno', 'Approved By': 'Potvrdio', 'Approver': 'Onaj koji odobrava', 'Approx. number of cases/48h': 'Približan broj slučaja u 48 h', 'Approximately how many children under 5 with diarrhea in the past 48 hours?': 'Približno koliko djece s dijarejom ispod 5 godina u zadnjih 48 sati?', 'Arabic': 'Arapski', 'ArcGIS REST Layer': 'ArcGIS REST sloj', 'Archive not Delete': 'Arhiva ne briši', 'Arctic Outflow': 'artički odljev', 'Are breast milk substitutes being used here since the disaster?': 'Da li se koriste zamjene za majčino mlijeko nakon katastrofe?', 'are mandatory and must be filled': 'su obavezna polja i moraju biti popunjena', 'Are there adults living in prisons in this area?': 'Ima li odrazlih u zatvoru u ovom području?', 'Are there alternative places for studying?': 'Postoje li alternativna mjesta za studiranje?', 'Are there cases of diarrhea among children under the age of 5?': 'Ima li slučajeva dijareje među djecom ispod 5 godina?', 'Are there children living in adult prisons in this area?': 'Ima li djece u zatvoru za odrasle u ovom području?', 'Are there children living in boarding schools in this area?': 'Ima li djece u internatima u ovom području?', 'Are there children living in homes for disabled children in this area?': 'Ima li djece u kućama za djecu s invaliditetom u ovom području?', 'Are there children living in juvenile detention in this area?': 'Ima li djece u pritvoru za maloljetnike?', 'Are there children living in orphanages in this area?': 'Ima li djece u domovima za napuštenu djecu u ovom području?', 'Are there older people living in care homes in this area?': 'Ima li starijih ljudi u domovima u ovom području?', 'Are there separate latrines for women and men available?': 'Da li su dostupni odvojeni zahodi za žene i muškarce?', 'Are you sure you want to commit to this request and send a shipment?': 'Da li ste sigurni da želite potvrditi ovaj zahtjev i poslati pošiljku', 'Are you sure you want to delete this record?': 'Jeste li sigurni da želite obrisati ovaj zapis?', 'Are you sure you want to send this shipment?': 'Jeste li sigurni da želite poslati ovu pošiljku?', 'Are you susbscribed?': 'Jeste li pretplaćeni?', 'Area': 'Površina', 'Areas inspected': 'Istražena područja', 'Arguments': 'Argumenti', 'Armenia': 'Armenija', 'Arrived': 'Pristiglo', 'artificial': 'vještački', 'Artificial eye left': 'Vještačko lijevo oko', 'As of yet, no sections have been added to this template.': 'Do sada nisu nove sekcije dodate u šablon', 'Assessment': 'Procjena', 'Assessment added': 'Procjena dodana', 'Assessment admin level': 'Nivo administratora procjene', 'Assessment Answer added': 'Dodan odgovor ocjene', 'Assessment Answer deleted': 'Obrisan odgovor ocjene', 'Assessment Answer Details': 'Detalji odgovora ocjene', 'Assessment Answer updated': 'Ažuriran odgovor ocjene', 'Assessment Answers': 'Odgovori ocjene', 'Assessment deleted': 'Procjena je izbrisana', 'Assessment Details': 'Detalji procjene', 'Assessment Question added': 'Pitanja ocjene dodana', 'Assessment Question deleted': 'Pitanja ocjene obrisana', 'Assessment Question Details': 'Detalji pitanja ocjene', 'Assessment Question updated': 'Pitanje ocjene ažurirano', 'Assessment Questions': 'Pitanja ocjene', 'Assessment Reported': 'Procjena izvještena', 'Assessment Summaries': 'Kratka procjena', 'Assessment Summary added': 'Dodat rezime procjene', 'Assessment Summary deleted': 'Izbrisan je rezime procjena', 'Assessment Summary Details': 'Detalji sažetka procjene', 'Assessment Summary updated': 'Sažetak procjene ažuriran', 'Assessment Template added': 'Dodan predložak ocjene', 'Assessment Template deleted': 'Obrisan predložak ocjene', 'Assessment Template Details': 'Detalji predloška ocjene', 'Assessment Template updated': 'Ažuriran predložak ocjene', 'Assessment Templates': 'Predlošci ocjene', 'Assessment timeline': 'Procjena vremenskog roka', 'Assessment Type:': 'Vrsta procjene:', 'Assessment updated': 'Ažurirana procjena', 'Assessments': 'Procjene', 'Assessments and Activities': 'Dodjele i aktivnosti', 'Assessments are structured reports done by Professional Organizations': 'Procjene su struktuirani izvještaji koje obavljaju profesionalne organizacije', 'Assessments Needs vs. Activities': 'Procjena potreba u usporedbi s aktivnostima', 'Assessments:': 'Procjene:', 'Assessor': 'Procjenitelj', 'Asset': 'Sredstvo', 'Asset added': 'Dodano sredstvo', 'Asset Assignments': 'Dodjela sredstava', 'Asset Assignments deleted': 'Dodjela sredstava je izbrisana', 'Asset deleted': 'Obrisano sredstvo', 'Asset Details': 'Detalji sredstva', 'Asset Item': 'Stavka sredstava', 'Asset Log': 'Zapisnik sredstava', 'Asset Log Details': 'Detalji zapisnika imovine i sredstava', 'Asset Log Empty': 'Zapisnik sredstava je prazan', 'Asset Log Entry Added - Change Label': 'Stavka zapisnika o sredstvu dodana - Promijenite naziv', 'Asset Log Entry deleted': 'Unos sredstva u zapisnik je obrisan', 'Asset Log Entry updated': 'Unos je ažuriran', 'Asset Management': 'Upravljanje sredstvima', 'Asset Number': 'Broj sredstva', 'Asset removed': 'Sredstvo uklonjeno', 'Asset updated': 'Ažurirano sredstvo', 'Assets': 'Materijalno-tehnička Sredstva', 'Assets are resources which are not consumable but are expected back, so they need tracking.': 'Materijalno-tehnička sredstva su resursi koji nisu potrošna roba i očekuje se njihov povrat, stoga je neophodan nadzor.', 'Assign': 'Dodjeli', 'Assign %(staff)s': 'Dodijeli %(staff)s', 'Assign another Role': 'Dodijeli drugu ulogu', 'Assign Asset': 'Dodijeli sredstvo', 'Assign Facility': 'Dodijeli objekat', 'Assign Group': 'Dodijeli grupu', 'Assign Human Resource': 'Dodijeli ljudske resurse', 'Assign Role to a User': 'Dodijeli ulogu korisniku', 'Assign Roles': 'Dodijeli uloge', 'Assign Staff': 'Dodjeli Osoblje', 'Assign Storage Location': 'Dodijeli lokaciju skladišta', 'Assign to Facility/Site': 'Dodijeli objektu/mjestu', 'Assign to Org.': 'Dodijeliti organizaciji', 'Assign to Organisation': 'Dodijeli organizaciji', 'Assign to Organization': 'Dodijeli organizaciji', 'Assign to Person': 'Dodijeli osobi', 'Assign to Site': 'Dodijeli mjestu', 'Assign Vehicle': 'Dodijeli vozila', 'assigned': 'dodijeljen', 'Assigned': 'Dodijeljeno', 'Assigned By': 'Dodijeljen od strane', 'Assigned Human Resources': 'Dodijeljeni ljudski resursi', 'Assigned Roles': 'Dodijeljene uloge', 'Assigned To': 'Dodjeljen', 'Assigned to': 'Dodijeljen', 'Assigned to Facility/Site': 'Dodijeljeni objeku/mjestu', 'Assigned to Organisation': 'dodijeljen organizaciji', 'Assigned to Organization': 'Dodijeljeno organizaciji', 'Assigned to Person': 'Dodijeljeno Osobi', 'Assigned to Site': 'Pridružen mjestu', 'Assignments': 'Zadaci', 'Assistant': 'Asistent', 'Assisted Family Care': 'Pomoć u porodičnoj njezi', 'Assisted Self-care': 'Pomoć u samonjezi', 'Association': 'Savez', 'At or below %s': 'Na ili ispod %s', 'At/Visited Location (not virtual)': 'Na/posjećena lokacija (ne virtuelno)', 'ATC-20 Rapid Evaluation modified for New Zealand': 'ATC-20 brza procjena modifikovana za New Zealand', 'Attachments': 'Dodaci', 'Attend to information sources as described in <instruction>': 'Pobrinuti se za izvore informacija kao što je opisano u polju <instruction>', 'Attributes': 'Atributi', 'Attribution': 'Pripisivanje', 'Audit Read': 'Prati čitanje', 'Australia': 'Australija', 'Austria': 'Austrija', 'AUTH TOKEN': 'AUTH TOKEN', "Authenticate system's Twitter account": 'Potvrdite Twitter račun sistema', 'Authentication Required': 'Potrebna provjera autentičnosti', 'Author': 'Autor', 'Auto start': 'Samopokretanje', 'Automatic Database Synchronization History': 'Historijat automatske sinhronizacije sa bazom podataka', 'Automotive': 'Samohodni', 'Availability': 'Dostupnost', 'Available': 'Raspoloživo', 'Available Alternative Inventories': 'Dostupne alternativne zalihe', 'Available Beds': 'Dostupni kreveti', 'Available databases and tables': 'Dostupne baze podataka i tabele', 'Available Databases and Tables': 'Dostupne baze podataka i tabele', 'Available for Location': 'Dostupno za lokaciju', 'Available Forms': 'Dostupne forme', 'Available from': 'Na raspolaganju od', 'Available in Viewer?': 'Je li dostupno u pregledniku?', 'Available Inventories': 'Dostupne zalihe', 'Available Messages': 'Dostupne Poruke', 'Available Records': 'Dostupni zapisi', 'Available Recovery Reports': 'Dostupni izvještaji o nađenim tijelima', 'Available until': 'Dostupno do', 'Avalanche': 'Lavina', 'average': 'prosjek', 'Average': 'Prosjek', 'Avoid the subject event as per the <instruction>': 'Izbjegni predmet događanja kao po <instruction>', 'Award': 'Nagrada', 'Award added': 'Nagrada dodana', 'Award deleted': 'Nagrada obrisana', 'Award updated': 'Nagrada ažurirana', 'Awards': 'Nagrade', 'Awareness raising': 'Podizanje obaviještenosti', 'Azerbaijan': 'Azerbejdžan', 'Babies who are not being breastfed, what are they being fed on?': 'Bebe koje nisu dojene, na koji su način hranjene?', 'Baby And Child Care': 'Bebe i briga za djecu', 'Back to Roles List': 'Nazad na listu uloga', 'Back to Top': 'Nazad na vrh', 'Back to Users List': 'Nazad na listu korisnika', 'Background Color': 'Pozadinska boja', 'Background Colour': 'Boja pozadine', 'Background Colour for Text blocks': 'Boja pozadine za tekstualne blokove', 'Bahai': 'Bahai', 'Bahamas': 'Bahami', 'Bahrain': 'Bahrein', 'Baldness': 'ćelavost', 'Banana': 'Banana', 'Bangladesh': 'Bangladeš', 'Bank/micro finance': 'Banka/mikrokreditna organizacija', 'Barge Capacity': 'Kapacitet skele', 'Barricades are needed': 'Potrebne su barikade', 'Base %(facility)s Set': 'Baza %(facility)s postavljena', 'Base Facility/Site Set': 'Postavljeno mjesto/objekt baze', 'Base Layer?': 'Osnovni sloj?', 'Base Layers': 'Osnovni slojevi', 'Base Location': 'Osnovna lokacija', 'Base Location Updated!': 'Lokacija baze ažurirana!', 'Base Site Set': 'Postavljeno mjesto baze', 'Base Station added': 'Sodana bazna stanica', 'Base Station deleted': 'Obrisana bazna stanica', 'Base Station Details': 'Detalji bazne stanice', 'Base Station updated': 'Ažurirana bazna stanica', 'Base Stations': 'Bazne stanice', 'Base Unit': 'Bazna jedinica', 'Base URL of the remote Sahana Eden instance including application path, e.g. http://www.example.org/eden': 'Bazni URL udaljene Sahana Eden instance uključujući stazu aplikacije, npr. http://www.example.org/eden', 'Baseline added': 'referentna tačka dodana', 'Baseline Data': 'Referentni podaci', 'Baseline deleted': 'Referentna tačka je izbrisana', 'Baseline Number of Beds': 'Bazni broj kreveta', 'Baseline number of beds of that type in this unit.': 'referentni broj kreveta tog tipa u ovoj jedinici', 'Baseline Type': 'Vrsta referentne tačku', 'Baseline Type added': 'Tip referentne tačke dodan', 'Baseline Type deleted': 'Izbrisan tip referentne tačke', 'Baseline Type Details': 'Detalji tipa referentne tačke', 'Baseline Type updated': 'Tip referentne tačke je ažuriran', 'Baseline Types': 'Tip referentne tačke', 'Baseline updated': 'Izmijenjena referentna tačka', 'Baselines': 'Referentne tačke', 'Baselines Details': 'Detalji referentne tačke', 'Basic Assessment': 'Osnovna procjena', 'Basic Assessment Reported': 'Osnovna procjena prijavljena', 'Basic Details': 'Osnovni detalji', 'Basic information on the requests and donations, such as category, the units, contact details and the status.': 'Osnovne informacije o zahtjevima i donacijama, kao što su kategorija, jedinice, kontaktni detalji i status-', 'Basic medical supplies available prior to disaster': 'Osnovna medicinska podrška dostupna prije katastrove', 'Basic medical supplies available since disaster': 'Osnovna medicinska podrška dostupna nakon katastrove', 'Basic reports on the Shelter and drill-down by region': 'Osnovni izvještaji o skloništu i dublja analiza po regijama', 'Baud': 'Baud', 'Baud rate to use for your modem - The default is safe for most cases': 'Baud rate korišten za Vaš modem - Zadano je sigurno za većinu slučajeva', 'BDRT (Branch disaster response teams)': 'BDRT (Odgovorni timovi ogranka u slučaju katastrofe)', 'Beam': 'Zraka', 'Bed Capacity': 'Krevetni kapaciteti', 'Bed Capacity per Unit': 'Kapacitet kreveta po jedinici', 'Bed Type': 'TIp ležaja', 'Bed type already registered': 'Tip kreveta već registriran', 'Bedding materials available': 'Dostupni materijali posteljine', 'beginning': 'početak', 'Belarus': 'Bjelorusija', 'Belgium': 'Belgija', 'belongs to': 'pripada u', 'Below ground level': 'Ispod nivoa tla', 'Beneficiaries': 'Korisnici', 'Beneficiaries Added': 'Korisnici dodani', 'Beneficiaries Deleted': 'Korisnici izbrisani', 'Beneficiaries Details': 'Detalji korisnika', 'Beneficiaries Updated': 'Korisnici ažurirani', 'Beneficiary': 'Korisnik', 'Beneficiary Report': 'Izvještaj o korisnicima', 'Beneficiary Type': 'Tip korisnika', 'Beneficiary Type Added': 'Dodan tip korisnika', 'Beneficiary Type Deleted': 'Izbrisan tip korisnika', 'Beneficiary Type Updated': 'Ažuriran tip korisnika', 'Beneficiary Types': 'Tipovi korisnika', 'Bhuddist': 'Bhudist', 'Bhutan': 'Butan', 'Big Capacity Tank Vehicle': 'Vozilo rezervoar velikog kapaciteta', 'Bilateral': 'Dvostrana', 'Bin': 'Korpa', 'Bing Layer': 'Bing sloj', "Bing Layers cannot be displayed if there isn't a valid API Key": 'Bing slojevi ne mogu biti prikazani ako nije ispravan API ključ', 'Biological Hazard': 'Biološke opasnosti', 'Biscuits': 'Keks', 'black': 'crna', 'Blizzard': 'Mećava', 'Blocked': 'Blokirano', 'blond': 'plavokosa', 'Blood Type (AB0)': 'Krvna grupa (AB0)', 'Blowing Snow': 'Mećava', 'blue': 'plavo', 'Boat': 'Čamac', 'Bodies': 'Tijela', 'Bodies found': 'Pronađena tijela', 'Bodies recovered': 'Pronađena tijela', 'Bodily Constitution': 'Tjelesna konstitucija', 'Body': 'Tijelo', 'Body Finds': 'Nađena tijela', 'Body Hair': 'Dlake po tijelu', 'Body hair, Colour': 'Dlake po tijelu, boja', 'Body hair, Extent': 'Dlake po tijelu, dužina', 'Body Recovery': 'Izvlačenje tijela', 'Body Recovery Request': 'Zahtjev za izvlačenje tijela', 'Body Recovery Requests': 'Zahtjeci za izvlačenje tijela', 'Bolivia': 'Bolivija', 'Bomb': 'Bomba', 'Bomb Explosion': 'Eksplozija bombe', 'Bomb Threat': 'Prijetnja bombom', 'Border Colour for Text blocks': 'Boja rubova tekstualnih polja', 'Bosnia and Herzegovina': 'Bosna i Herzegovina', 'Both': 'Oboje', 'Botswana': 'Bocvana', 'Bounding Box Insets': 'Nacrti okvirne kutije', 'Bounding Box Size': 'Velilčina ambalažne kutije', 'box': 'kutija', 'Boys 13-18 yrs in affected area': 'Dječaci 13-18 god u pogođenom području', 'Boys 13-18 yrs not attending school': 'Dječaci 13-18 godina koji ne pohađaju školu', 'Boys 6-12 yrs in affected area': 'Dječaci 6-12 godina u zahvaćenim područjima', 'Boys 6-12 yrs not attending school': 'Dječaci 6-12 godina koje ne pohađaju školu', 'Branch': 'Ogranak', 'Branch Coordinator': 'Koordinator ogranka', 'Branch Organization added': 'Dodan ogranak organizacije', 'Branch Organization deleted': 'Obrisan ogranak organizacije', 'Branch Organization Details': 'Detalji ogranka organizacije', 'Branch Organization updated': 'Ažuriran ogranak organizacije', 'Branch Organizations': 'ogranci organizacije', 'Branches': 'Ogranci', 'Brand': 'Marka', 'Brand added': 'Marka dodana', 'Brand deleted': 'Marka obrisana', 'Brand Details': 'Detalji marke', 'Brand updated': 'Marka ažurirana', 'Brands': 'Marke', 'Breakdown': 'Prekid', 'Breast milk substitutes in use since disaster': 'Zamjene za majčino mlijeko korištene nakon katastrofe', 'Breast milk substitutes used prior to disaster': 'Korištene zamjene za majčino mlijeko prije katastrofe', 'Bricks': 'Cigle', 'Bridge Closed': 'Most zatvoren', 'broad': 'široko', 'brown': 'smeđa', 'Brunei': 'Brunej', 'Bucket': 'Kanta', 'Buddhist': 'Budist', 'Budget': 'Budžet', 'Budget added': 'Dodat budžet', 'Budget deleted': 'Budžet obrisan', 'Budget Details': 'Detalji Budzeta', 'Budget Updated': 'Budžet Ažuriran', 'Budget updated': 'Budžet ažuriran', 'Budgeting Module': 'Modul za budžetiranje', 'Budgets': 'Budžeti', 'Buffer': 'Spremnik', 'Bug': 'Buba', 'Building Assessments': 'Procjene građevina', 'Building Collapsed': 'Zgrada srušena', 'Building Name': 'Ime zgrade', 'Building or storey leaning': 'Zgrada ili sprat su nageti', 'Building Safety Assessments': 'Procjena sigurnosti objekta', 'Building Short Name/Business Name': 'Ime zgrade/biznisa', 'Built using the Template agreed by a group of NGOs working together as the': 'Izgrađeno koristeći šablon kreiran od strane grupe NVO radeći zajedno kao', 'Bulgaria': 'Bugarska', 'Bulk Uploader': 'Masovni prenos', 'Bundle': 'Paket', 'Bundle added': 'Paket dodan', 'Bundle Contents': 'Sadržaji paketa', 'Bundle deleted': 'Paket obrisan', 'Bundle Details': 'Detalji paketa', 'Bundle Updated': 'Paket ažuriran', 'Bundle updated': 'Paket je ažuriran', 'Bundles': 'Svežnji', 'Bunion': 'Kriv nožni palac', 'Burn': 'Spaljeno', 'Burn ICU': 'Spaljen ICU', 'Burned/charred': 'Spaljeno/ugljenisano', 'Business damaged': 'Oštećenje industrije', 'Button name': 'Ime dugmeta', 'by': 'od strane', 'by %(person)s': 'od %(person)s', 'By %(site)s': 'Po %(site)s', 'By Facility': 'Po objektu', 'By Inventory': 'Po skladištu', 'By selecting this you agree that we may contact you.': 'Izborom ovoga slažete se da vas možemo kontaktirati.', 'By Site': 'Po mjestu', 'By Warehouse': 'NEBESKO SKLADIŠTE', 'c/o Name': 'c/o Ime', 'Cache': 'Keš', 'Cache Keys': 'Ključevi za gotovinu', 'Calculate': 'Izračunaj', 'Calendar': 'Kalendar', 'Cambodia': 'Kampučija', 'Cameroon': 'Kamerun', 'Camp': 'Kamp', 'Camp added': 'Dodan kamp', 'Camp Coordination/Management': 'Koordinacija kampa/Menadžment', 'Camp deleted': 'Obrisan kamp', 'Camp Details': 'Detalji o kampu', 'Camp Service': 'Usluga kampa', 'Camp Service added': 'Dodana je uluga kampa', 'Camp Service deleted': 'Obrisana je usluga kampa', 'Camp Service Details': 'Detalji o uslugama kampa', 'Camp Service updated': 'Ažurirana je usluga kampa', 'Camp Services': 'Usluge kampa', 'Camp Status': 'Status kampa', 'Camp Status added': 'Dodana je status kampa', 'Camp Status deleted': 'Obrisan je status kampa', 'Camp Status Details': 'Detalji statusa kampa', 'Camp Status updated': 'Ažuriran je status kampa', 'Camp Statuses': 'Statusi kampa', 'Camp Type': 'Tip kampa', 'Camp Type added': 'Tip kampa dodan', 'Camp Type deleted': 'Tip kampa obrisan', 'Camp Type Details': 'Detalji tipa kampa', 'Camp Type updated': 'Tip kampa ažuriran', 'Camp Types': 'Vrste kampa', 'Camp Types and Services': 'Tipovi i usluge kampova', 'Camp updated': 'Ažuriran kamp', 'Campaign': 'Kampanja', 'Campaign Added': 'Kampanja dodana', 'Campaign Deleted': 'Kampanja izbrisana', 'Campaign ID': 'ID kampanje', 'Campaign Message': 'Poruka kampanje', 'Campaign Message Added': 'Dodana poruka kampanje', 'Campaign Message Deleted': 'Obrisana poruka kampanje', 'Campaign Message Updated': 'Ažurirana poruka kampanje', 'Campaign Messages': 'Poruke kampanje', 'Campaign Updated': 'Kampanja ažurirana', 'Campaigns': 'Kampanje', 'Camps': 'Kampovi', 'Can be grouped together into Feature Groups': 'Mogu se grupisati u grupe karakteristika', 'can be used to extract data from spreadsheets and put them into database tables.': 'može se koristiti za izvlačenje podataka iz tabelarnog prikaza i stavljanje istih u tabele baza podataka.', 'Can only approve 1 record at a time!': 'Moguće je potvrditi samo jedan zapis istovremeno!', 'Can only disable 1 record at a time!': 'Moguće je onemogućiti samo 1 zapis trenutno!', 'Can only enable 1 record at a time!': 'Omogućen je samo jedan zapis istovremeno!', 'Can only update 1 record at a time!': 'Moguće je ažurirati samo jedan zapis istovremeno!', 'Can read PoIs either from an OpenStreetMap file (.osm) or mirror.': 'Može čitati tačke interesa iz OpenStreetMap datoteke (.osm) ili rezervnog servera.', "Can't import tweepy": 'Nemoguće unijeti tweepy', 'Canada': 'Kanada', 'Cancel': 'Otkaži', 'Cancel Crop': 'Otkaži rezanej', 'Cancel editing': 'Otkaži uređivanje', 'Cancel Log Entry': 'Otkaži stavku zapisnika', 'Cancel Shipment': 'Otkazati pošiljku', 'Canceled': 'Otkazano', 'Candidate Matches for Body %(label)s': 'Kandidat odgovara tijelu %(label)s', 'Candidate Matches for Body %s': 'Kandidat odgovara tijelu %s', 'Canned Fish': 'Konzervirana riba', 'cannot be deleted.': 'ne može se obrisati.', 'Cannot be empty': 'Ne može biti prazno', 'Cannot disable your own account!': 'Ne možete onesposobiti svoj račun!', 'Cannot make an Organization a branch of itself!': 'Ne može se napraviti organizacija koja je vlastiti ogranak!', 'Cannot open created OSM file!': 'Ne mogu otvoriti kreiranu OSM datoteku!', 'Cannot read from file: %(filename)s': 'Ne mogu pročitati iz datoteke: %(filename)s', 'Cannot send messages if Messaging module disabled': 'Ne mogu se slati poruke ako je modul za poruke isključen', 'Capacity (Day / Evacuation)': 'Kapacitet (Dan / Evakuacija)', 'Capacity (Day and Night)': 'Capacity (Day and Night)', 'Capacity (Day)': 'Kapacitet (dan)', 'Capacity (Max Persons)': 'Kapacitet (maksimalan broj osoba)', 'Capacity (Night / Post-Impact)': 'Kapacitet (Noć / nakon utjecaja)', 'Capacity (Night only)': 'Capacity (Night only)', 'Capacity (Night)': 'Kapacitet (noć)', 'Capacity (W x D X H)': 'Kapacitet (Š x D x V)', 'Capacity Building': 'Kapacitet zgrada', 'Cape Verde': 'Zelenortska Ostrva', 'Capture Contact Information': 'Dohvati informacije o kontaktu', 'Capture Information on Disaster Victim groups (Tourists, Passengers, Families, etc.)': 'Snimi informacije o grupama žrtava nesreće (turisti, putnici, porodice, itd.)', 'Capture Information on each disaster victim': 'Unesi informacije o svakoj žrtvi katastrofe', 'Capturing the projects each organization is providing and where': 'Bilježenje projekata koje svaka organizacija omogućava i gdje', 'Card holder': 'Vlasnik kartice', 'Cardiology': 'Kardiologija', 'Cargo Pier Depth': 'Dubina mola za teret', 'Case added': 'Dodan slučaj', 'Case deleted': 'Obrisan slučaj', 'Case Details': 'Detalji slučaja', 'Case Number': 'Broj slučaja', 'Case updated': 'Ažuriran slučaj', 'Cases': 'Slučajevi', 'Cash available to restart business': 'Gotovina dostupna za ponovni početak posla', 'Cassava': 'Tropska biljka manioka', 'Casual Labor': 'Obični rad', 'Casualties': 'Gubici', 'Catalog': 'Katalog', 'Catalog added': 'Katalog dodan', 'Catalog deleted': 'Katalog obrisan', 'Catalog Details': 'Detalji o katalogu', 'Catalog Item': 'Stavka kataloga', 'Catalog Item added': 'Dodata stavka u katalog', 'Catalog Item deleted': 'Obrisana stavka iz katalog', 'Catalog Item updated': 'Ažurirana stavka u katalog', 'Catalog Items': 'stavke kataloga', 'Catalog Name': 'Ime kataloga', 'Catalog updated': 'Katalog ažuriran', 'Catalogs': 'Katalozi', 'Categories': 'Kategorije', 'Category': 'Kategorija', 'Category:': 'Kategorija:', 'Category<>Sub-Category<>Catalog Relation added': 'Kategorija<>Podkategorija<>kataloški odnos dodan', 'Category<>Sub-Category<>Catalog Relation updated': 'Kategorija<>Podkategorija<>kataloški odnos ažuriran', 'caucasoid': 'bjelačka', "Caution: doesn't respect the framework rules!": 'Upozorenje: nepoštivanje okvirnih pravila!', 'CBA Women': 'CBA žena', 'CDRT (Community disaster response teams)': 'CDRT (Timovi zajednice za odgovore u slučaju katastrofe)', 'Ceilings, light fixtures': 'Stropovi, popravke svjetla', 'Cell Phone': 'Mobilni telefon', 'Cell Tower': 'Ćelijski toranj', 'Central African Republic': 'Centralnoafrička Republka', 'Central point to record details on People': 'Centralna lokacija za bilježenje detalja o ljudima', 'Certificate': 'Certifikat', 'Certificate added': 'Dodat certifikat', 'Certificate Catalog': 'Katalog Certifikata', 'Certificate deleted': 'Obrisan certifikat', 'Certificate Details': 'Detalji o certifikatu', 'Certificate Status': 'Status certifikata', 'Certificate updated': 'Ažuriran certifikat', 'Certificates': 'Certifikati', 'Certification': 'Certificiranje', 'Certification added': 'Dodan certifikat', 'Certification deleted': 'Obrisana certifikacija', 'Certification Details': 'Detalji certifikacije', 'Certification updated': 'Ažurirana certifikacija', 'Certifications': 'Certifikati', 'Certifying Organization': 'Organizacija koja daje certifikat', 'Chad': 'Čad', 'Change Password': 'Promijeni lozinku', 'Channel': 'Kanal', 'Chart': 'Grafikon', 'Chat on IRC': 'Ćaskanje na IRC', 'Check': 'Provjera', 'check all': 'označi sve', 'Check all': 'Provjeri sve', 'Check for errors in the URL, maybe the address was mistyped.': 'Pogledajte greške na URL , možda je došlo do greške pri kucanju.', 'Check if the URL is pointing to a directory instead of a webpage.': 'Provjeri da li URL pokazuje na direktorij umjesto na stranicu', 'Check outbox for the message status': 'Provjerite izlaznu poštu za status poruke', 'Check Request': 'Provjerite zahtjev', 'Check this to make your search viewable by others.': 'Označite ovo da vaša pretraga bude vidljiva ostavlim', 'Check to delete': 'Označi za brisanje', 'Check to delete:': 'Označi z abrisanje', 'Check-In': 'Ubaci', 'Check-in at Facility': 'Označi na objektu', 'Check-Out': 'Izdvoji', 'Checked': 'Provjereno', 'checked': 'provjereno', 'Checked-In successfully!': 'Uspješno ubačeno', 'Checked-Out successfully!': 'Uspješno izdvojeno', 'Checklist': 'Spisak', 'Checklist created': 'Kontrolni spisak kreiran', 'Checklist deleted': 'Lista zadataka obrisana', 'Checklist Item': 'Zadatak u listi', 'Checklist of Operations': 'Lista operacija', 'Checklist updated': 'Ažurirana lista zadataka', 'Checklists': 'Liste zadataka', 'Chemical Hazard': 'Hemijska opasnost', 'Chemical, Biological, Radiological, Nuclear or High-Yield Explosive threat or attack': 'Hemijske, biološke, radiološke, nuklearne ili visoko-prinosne eksplozivne prijetnje ili napadi', 'Chewing tobacco': 'Duhan za žvakanje', 'Chicken': 'Pilići', 'Child': 'Dijete', 'Child (2-11)': 'Dijete (2-11)', 'Child (< 18 yrs)': 'dijete(<18 godina)', 'Child Abduction Emergency': 'Hitan slučaj otmice djeteta', 'Child headed households (<18 yrs)': 'Dijete na čelu domaćinstva (<18 god)', 'Children (2-5 years)': 'Djeca (2-5 godina)', 'Children (5-15 years)': 'Djeca (5-15 godina)', 'Children (< 2 years)': 'Djeca (mlađa od 2 godine)', 'Children in adult prisons': 'Djeca u zatvorima za odrasle', 'Children in boarding schools': 'Djeca u internatima', 'Children in homes for disabled children': 'Djeca u kućama za djecu s invaliditetom', 'Children in juvenile detention': 'Djeca u pritvoru za maloljetnike', 'Children in orphanages': 'Djeca u sirotištu', 'Children living on their own (without adults)': 'Djeca koja žive sama (bez staratelja)', 'Children not enrolled in new school': 'Djeca koja nisu upisana o novu školu', 'Children orphaned by the disaster': 'Djeca koja su siročad zbog katastrofe', 'Children separated from their parents/caregivers': 'Djeca odvojena od svojih roditelja/staratelja', 'Children that have been sent to safe places': 'Djeca koja su poslana na sigurna mjesta', 'Children who have disappeared since the disaster': 'Djeca nestala nakon katastrofe', 'Children with chronical illnesses': 'Djeca s hroničnim bolestima', "Children's Education": 'Obrazovanje djece', 'Chile': 'Čile', 'Chin, Inclination': 'Brada, nagib', 'Chin, Shape': 'Brada, oblik', 'Chin, Size': 'Brada, veličina', 'China': 'Kina', 'Chinese': 'Kineski', 'Chinese (Simplified)': 'Kineski (pojednostavljen)', 'Chinese (Taiwan)': 'Kineski (Tajvan)', 'Cholera Treatment': 'Tretman kolere', 'Cholera Treatment Capability': 'Sposobnost tretmana kolere', 'Cholera Treatment Center': 'Centar za tretman kolere', 'Cholera-Treatment-Center': 'Centar za liječenje kolere', 'Choose a new posting based on the new evaluation and team judgement. Severe conditions affecting the whole building are grounds for an UNSAFE posting. Localised Severe and overall Moderate conditions may require a RESTRICTED USE. Place INSPECTED placard at main entrance. Post all other placards at every significant entrance.': 'Izaberi novo slanje bazirano na novim vrednovanjima i procjeni tima. Teški uslovi koji utiču na cijelu zgradu su temeljni za NESIGURNO postavljanje. Lokalizirano teški i pretežno umjereni uslovi mogu zahtjevati OGRANIČENO korištenje. Stavite UOČLJIV plakat na glavni ulaz. Postavite sve ostale plakate na sve značajnije ulaze.', 'Choose Country': 'Izaberite državu', 'Choose Manually': 'Izaberi ručno', 'Choosing Skill and Resources of Volunteers': 'Izbor vještina i resursa volontera', 'Christian': 'Kršćanin', 'Church': 'Crkva', 'Cigarettes': 'Cigarete', 'Cigars': 'Cigare', 'circular': 'kružno', 'Circumstances of disappearance, other victims/witnesses who last saw the missing person alive.': 'Okolnosi nestanka, druge žrtve/svjedoci koji su zadnji vidjeli živu osobu.', 'City': 'grad', 'City / Town / Village': 'Općina/Mjesto', 'Civil Emergency': 'Civilno izvanredno stanje', 'Cladding, glazing': 'Oblaganje, glačanje', 'Clean Instance': 'Čista kopija', 'clear': 'čisto', 'Clear All': 'Obriši sve', 'Clear all Layers': 'Očisti sve slojeve', 'Clear CACHE?': 'Obrisati predmemoriju?', 'Clear DISK': 'Obriši na disku', 'Clear filter': 'Očisti filter', 'Clear RAM': 'Obriši u memoriji', 'Clear Selection': 'Obriši izbor', 'Cleft chin': 'Rupica na bradi', "Click 'Start' to synchronize with this repository now:": "Kliknite 'Start' za sinhronizaciju s ovim repozitorijem sada:", 'click for more details': 'pritisni za više detalja', 'click here': 'kliknite ovdje', 'Click on a marker to see the Completed Assessment Form': 'Kliknite na marker da vidite formular za završenu ocjenu', "Click on questions below to select them, then click 'Display Selected Questions' button to view the selected questions for all Completed Assessment Forms": "Kliknite na pitanja ispod da ih odaberete, zatim kliknite na 'Prikaži izabrana pitanja' dugme da vidite izabrana pitanja za sve završene formulare procjene.", 'Click on the chart to show/hide the form.': 'Kliknite na dijagram za prikaz/sakrivanje formulara', 'Click on the link': 'Kliknite na link', 'Click on the slider to choose a value': 'Kliknite na klizač za izbor vrijednosti', 'Click to edit': 'Kliknite da uredite', 'Click where you want to open Streetview': 'Kliknite gdje želite otvoriti Streetview', 'Client ID': 'Identifikacija korisnika', 'Client IP': 'IP klijenta', 'Client Secret': 'Tajni ključ korisnika ', 'Climate': 'Klima', 'Climate change mitigation': 'Ograničenja izmjena klime', 'Climate change preparednes': 'Pripreme na izmjene klime', 'Clinical Laboratory': 'Klinički laboratorij', 'Clinical Operations': 'Kliničke operacije', 'Clinical Status': 'Klinički status', 'Close': 'Zatvori', 'Close map': 'Zatvori mapu', 'Closed': 'Zatvoreno', 'CLOSED': 'ZATVORENO', 'Closed?': 'Zatvoreno?', 'Closure': 'Zatvaranje', 'Clothing': 'Odjeća', 'Cluster': 'Skup', 'Cluster added': 'Skup dodan', 'Cluster Attribute': 'Atribut skupa', 'Cluster deleted': 'Skup obrisan', 'Cluster Details': 'Detalji skupa', 'Cluster Distance': 'Udaljenost skupova', 'Cluster Subsector': 'Podsektor skupa', 'Cluster Subsector added': 'Podsektor skupa dodan', 'Cluster Subsector deleted': 'Podsektor skupa obrisan', 'Cluster Subsector Details': 'Detalji podsektora skupa', 'Cluster Subsector updated': 'Podsektor skupa ažuriran', 'Cluster Subsectors': 'Podsektori skupa', 'Cluster Threshold': 'Prag skupa', 'Cluster updated': 'Skup ažuriran', 'Cluster(s)': 'Skup(ovi)', 'Clusters': 'Skupovi', 'CN': 'CN', 'Coalition added': 'Koalicija dodana', 'Coalition Details': 'Detalji koalicije', 'Coalition removed': 'Uklonjena koalicija', 'Coalition updated': 'Koalicija ažurirana', 'Coalitions': 'Koalicije', 'Code': 'Šifra', 'Code Share': 'Dijeljenja koda', 'Code:': 'Kôd:', 'Cold Wave': 'Hladni talas', 'Collapse, partial collapse, off foundation': 'Kolaps, djelimični kolaps, pomjereni temelji', 'collateral event': 'kolateralni događaj', 'Collective center': 'Kolektivni centar', 'Colombia': 'Kolumbija', 'Colour for Underline of Subheadings': 'Boja za podvlačenje podnaslova', 'Colour of bottom of Buttons when not pressed': 'Boja dna dugmadi kada nisu pritisnuta', 'Colour of bottom of Buttons when pressed': 'Boja dna tastera kada je pritisnut', 'Colour of Buttons when hovering': 'Boja dugmadi kada se prelijeću', 'Colour of dropdown menus': 'Boja padajućih menija', 'Colour of selected Input fields': 'Boja selektovanih polja za unos', 'Colour of selected menu items': 'Boja označenih stavki meni-a', 'Column Choices (One Per Line': 'Izbor kolona (Jedan po liniji)', 'Columns, pilasters, corbels': 'Stubovi, pilastri, korbali', 'Combined Method': 'Kombinovana metoda', 'Come back later.': 'Vratite se poslije.', 'Come back later. Everyone visiting this site is probably experiencing the same problem as you.': 'Pokušajte kasnije. Svi koji posjećuju ovaj sajt vjerojatno imaju isti problem kao i vi.', 'Command Tactical Operational Vehicle': 'Komandno taktičko radno vozilo', 'Comment': 'Komentar', 'Comments': 'Komentari', 'Comments permitted?': 'Komentarisanje dozvoljeno', 'Commercial/Offices': 'Poslovni/Uredi', 'Commit': 'Izvrši', 'Commit All': 'Potvrdi sve', 'Commit Date': 'Datum izvršenja', 'Commit from %s': 'Izvrši od %s', 'Commit Status': 'Status zaduženja', 'Commiting a changed spreadsheet to the database': 'Predavanje izmijenjenog tabelarnog prikaza bazi podataka', 'Commitment': 'Zaduženje', 'Commitment Added': 'Zaduženje dodano', 'Commitment Canceled': 'Zaduženje otkazano', 'Commitment Details': 'Detalji o zaduženjima', 'Commitment Item': 'Stavka angažovanja', 'Commitment Item added': 'Stavka zaduženj dodana', 'Commitment Item deleted': 'Stavka zaduženja obrisana', 'Commitment Item Details': 'Detalji o zaduženju', 'Commitment Item updated': 'Stavka zaduženja ažurirana', 'Commitment Items': 'Stavke zaduženja', 'Commitment Status': 'Status zaduženja', 'Commitment Updated': 'Zaduženje ažurirano', 'Commitments': 'Zaduženja', 'Commitments can be made against these Requests however the requests remain open until the requestor confirms that the request is complete.': 'Zaduženja mogu biti napravljena prema ovim zahtjevima, ali ona ostaju otvorena dok zahtjevaoc ne potvrdi da je zahtjev kompletan.', 'Committed': 'Zaduženo', 'Committed By': 'Zaduženo od strane', 'Committed Items': 'Zadužene stavke', 'Committed People': 'Zaduženo osoblje', 'Committed People Details': 'Detalji o zaduženoj osobi', 'Committed People updated': 'Zadužene osobe ažurirane', 'Committed Person Details': 'Detalji o zaduženoj osobi', 'Committed Person updated': 'Zadužena osoba ažurirana', 'Committing Inventory': 'Predavanje inventara', 'Committing Organization': 'Izvršna organizacija', 'Committing Person': 'Izvršna osoba', 'Committing Warehouse': 'Zadužena skladišta', 'Commodities Loaded': 'Roba natovarena', 'Communication problems': 'Komunikacijski problemi', 'Communities': 'Zajednice', 'Community': 'Zajednica', 'Community Added': 'Uajednica dodana', 'Community Based Health and First Aid (CBHFA)': 'Zdravstvo i prva pomoć koju organuzuje društvena zajednica (CBHFA)', 'Community Centre': 'Mjesna zajednica', 'Community Contacts': 'Kontakt podaci zajednice', 'Community Deleted': 'Zajednica obrisana', 'Community Details': 'Detalji zajednice', 'Community Health Center': 'Dom zdravlja', 'Community Member': 'Član zajednice', 'Community organisation': 'Organizacija zajednice', 'Community Updated': 'Zajednica ažurirana', 'Comoros': 'Komori', 'Company': 'Preduzeće', 'Competencies': 'sposobnosti', 'Competency': 'Sposobnost', 'Competency added': 'Stručnosti dodane', 'Competency deleted': 'Stručnost obrisana', 'Competency Details': 'Detalji o sposobnostima', 'Competency Rating': 'Nivo spremnosti', 'Competency Rating added': 'Ocjena stručnosti dodana', 'Competency Rating Catalog': 'Katalog ocjena stručnosti', 'Competency Rating deleted': 'Ocjena stručnosti obrisana', 'Competency Rating Details': 'Detalji statusa spremnosti', 'Competency Rating updated': 'Ocjena stručnosti ažurirana', 'Competency Ratings': 'Ocjene sposobnosti', 'Competency updated': 'Stručnost je ažurirana', 'Complete': 'Završeno', 'Complete Adjustment': 'Završi podešavanje', 'Complete Database Synchronized': 'Kompletna baza podataka sinhronizovana', 'Complete Returns': 'Završena vraćanja', 'Complete Unit Label for e.g. meter for m.': 'Puno ime jedinice, npr metar za m.', 'Complete? Please call': 'Završeno? Molim pozovite', 'Completed': 'Završeno', 'completed': 'završeno', 'Completed Assessment Form deleted': 'Formular završene procjene obrisan', 'Completed Assessment Form Details': 'Detalji završenog formulara ocjene', 'Completed Assessment Form entered': 'Unesen završen formular ocjene', 'Completed Assessment Form updated': 'Brza procjena ažurirana', 'Completed Assessment Forms': 'Završeni formulari ocjene', 'Completed Assessments': 'Završene ocjene', 'Completed tour?': 'Završena tura', 'Completion Question': 'Pitanje završavanja', 'Complexion': 'Ten', 'Compose': 'Sastavi', 'Compromised': 'Kompromitirano', 'concave': 'konkavn', 'Concrete frame': 'Betonski okvir', 'Concrete shear wall': 'Betonsko smicanje zida', 'Condition': 'Stanje', 'Conduct a Disaster Assessment': 'Obavi procjenu katastrofe', 'Config': 'Konfiguracija', 'Config added': 'Konfiguracija dodana', 'Config deleted': 'Konfiguracija izbrisana', 'Config Details': 'Detalji konfiguracije', 'Config not found!': 'Konfiguracija nije nađena!', 'Config updated': 'KOnfiguracija ažurirana', 'Configs': 'Konfiguracije', 'Configuration': 'Konfiguracija', 'Configurations': 'Konfiguracije', 'Configure connection details and authentication': 'Konfigurišite detalje o povezivanju i autentifikaciju', 'Configure Layer for this Symbology': 'Konfigurišite sloj za ovo značenje simbola', 'Configure resources to synchronize, update methods and policies': 'Konfigurišite resurse za sinhronizaciju, metode ažuriranja i politike', 'Configure Run-time Settings': 'Konfiguriši izvršne postavke', 'Configure the default proxy server to connect to remote repositories': 'Konfigurišite podrazumijevani proxy server za vezu s udaljenim repozitorijima', 'Configure/Monitor Synchronization': 'Konfiguriši/prati sinhronizaciju', 'Confirm Shipment Received': 'Potvrdite primljenu pošiljku', 'Confirm that some items were returned from a delivery to beneficiaries and they will be accepted back into stock.': 'Potvrdite da su neki artikli vraćeni od korisnika i da će biti prihvaćeni nazad u skladište.', 'Confirm that the shipment has been received by a destination which will not record the shipment directly into the system and confirmed as received.': 'Potvrdite da je dostava stigla na odredište koje neće bilježiti dostavu direktno u sistem i da je potvrđeno kao primljeno.', 'Confirmed': 'Potvrđeno', 'confirmed': 'potvrđeno', 'Confirmed Incidents': 'Potvrđen incident', 'Confirming Organization': 'Organizacija koja potvrđuje', 'Conflict Details': 'Detalji sukoba', 'Conflict Policy': 'Politika konflikta', 'Conflict Resolution': 'Razrješenje konflikta', 'Congo, Democratic Republic of the (Congo-Kinshasa)': 'Kongo, Demokratska Republika (Zair)', 'Congo, Republic of the (Congo-Brazzaville)': 'Kongo, Republika (Brazzaville)', 'Connect Parser': 'Parser konekcija', 'consider': 'razmotri', 'Consignment Note': 'Sprovodni list', 'Consignment Number, Tracking Number, etc': 'Konsignacijski broj, praćeni broj itd.', 'constraint_id': 'ogranicenje_id', 'Constraints Only': 'Samo ograničenja', 'Consumable': 'Potrošni', 'Contact': 'Kontakt osoba', 'Contact Added': 'Informacije o kontaktu su dodane', 'Contact added': 'Informacije o kontaktu su dodane', 'Contact Data': 'Kontakt podaci', 'Contact deleted': 'Kontakt obrisan', 'Contact Deleted': 'Izbrisan kontakt', 'Contact details': 'Detalji o kontaktu', 'Contact Details': 'Detalji o kontaktu', 'Contact Details updated': 'Informacije o kontaktu su ažurirane', 'Contact Info': 'Kontakt podaci', 'Contact Information': 'Kontakt informacije', 'Contact Information Added': 'Informacije o kontaktu su unesene', 'Contact information added': 'Dodata kontakt informacija', 'Contact Information Deleted': 'Izbrisane informacije o kontaktu', 'Contact information deleted': 'Obrisana kontakt informacija', 'Contact Information Updated': 'Informacije o kontaktu ažurirane', 'Contact information updated': 'Ažurirana kontakt informacija', 'Contact Method': 'Način kontakta', 'Contact Name': 'Ime kontakt osobe', 'Contact People': 'Kontakt osobe', 'Contact Person': 'Kontakt osoba', 'Contact Phone': 'Kontakt telefon', 'Contact Updated': 'Ažurirane kontakt informacije', 'Contact Us': 'Kontaktirajte nas', 'Contact us': 'Kontaktirajte nas', 'Contacts': 'Kontakti', 'Contacts:': 'Kontakti:', 'Content': 'Sadržaj', 'Content Management': 'Upravljanje sadržajem', 'Content Management System': 'Sistem za upravljanje sadržajem', 'Contents': 'Sadržaj', 'Context': 'Kontekst', 'Contingency planning': 'Planiranje za vanredne slučajeve', 'Contract End Date': 'Krajnji datum ugovora', 'Contradictory values!': 'Protivriječne vrednosti!', 'Contributor': 'Saradnik', 'Controller': 'Kontroler', 'Controller name': 'Ime kontrolera', 'Controller tour is activated': 'Tura kontrolera je aktivirana', 'Conversion Tool': 'Sredstvo konverzije', 'convex': 'konveksni', 'Cook Islands': 'Kukova Ostrva', 'Cooking NFIs': 'Neprehrambeni artikli za kuhanje', 'Cooking Oil': 'Jestivo ulje', 'Coordinate Conversion': 'Pretvaranje Koordinata', 'Coordinate Layer': 'Sloj koordinata', 'Coping Activities': 'Aktivnosti suočavanja', 'Copy': 'Kopiraj', 'Corn': 'Kukuruz', 'Corporate Entity': 'Poslovna jedinica', 'Cost per Megabyte': 'Cijena po megabajtu', 'Cost per Minute': 'Trošak po minutu', 'Cost Type': 'Vrsta troška', 'Costa Rica': 'Kostarika', 'Could not add person record': 'Ne mogu dodati zapis o osobi', 'Could not auto-register at the repository, please register manually.': 'Ne mogu automatski registrovati na repozitoriju, molim registrujte ručno.', 'Could not create record.': 'Ne može se kreirati zapis', 'Could not initiate manual synchronization.': 'Ne mogu pokrenuti ručnu sinhronizaciju', 'Could not merge records. (Internal Error: %s)': 'Ne mogu spojiti slogove. (Interna greška: %s)', "couldn't be parsed so NetworkLinks not followed.": 'nije mogao biti analiziran pa se Mrežni linkovi ne prate', "Couldn't open %s!": 'Ne mogu otvoriti %s!', 'Counselling': 'Savjet', 'Count': 'Broj', 'Count of Question': 'Broj pitanja', 'Country': 'Država', 'Country Code': 'Kôd države', 'Country is required!': 'Zahtijevana država', 'Country of Residence': 'Država prebivališta', 'County': 'Pokrajina', 'County / District': 'Kanton / Regija', 'Course': 'Kurs', 'Course added': 'Dodan kurs', 'Course Catalog': 'Katalog kurseva', 'Course Certicate added': 'Dodat certifikat kursa', 'Course Certicate deleted': 'Certifikat kursa izbrisan', 'Course Certicate Details': 'Detalji certifikata kursa', 'Course Certicate updated': 'Potvrda o kursu ažurirana', 'Course Certicates': 'Certifikati kurseva', 'Course Certificate added': 'Dodat certifikat kursa', 'Course Certificate deleted': 'Obrisan certifikat kursa', 'Course Certificate Details': 'Detalji certifikata kursa', 'Course Certificate updated': 'Ažuriran certifikat kursa', 'Course Certificates': 'Certifikati kursa', 'Course deleted': 'Obrisan kurs', 'Course Details': 'Detalji kursa', 'Course updated': 'Ažuriran kurs', 'Courses': 'Kursevi', 'covered': 'prekriveno', 'Create': 'Kreiraj', 'Create & manage Distribution groups to receive Alerts': 'Kreiraj & upravljaj grupama distibucije za primanje znakova za uzbunu', "Create 'More Info'": 'Kreiraj dodatne podatke', 'Create a group entry in the registry.': 'Kreiraj unosenje grupe u registar.', 'Create a new facility or ensure that you have permissions for an existing facility.': 'Kreirajte novi objekat ili osigurajte da imate potrebna prava nad postojećim objektom.', 'Create a new Group.': 'Kreiraj novu grupu.', 'Create a new organization or ensure that you have permissions for an existing organization.': 'Kreirajte novu organizaciju ili osigurajte da imate potrebna prava nad postojećom organizacijom.', 'Create a new Team.': 'Kreiraj novi Tim', 'Create a Person': 'Kreiraj osobu', 'Create Activity': 'Kreiraj aktivnost', 'Create Activity Report': 'Kreiraj izvještaja o aktivnostima', 'Create Activity Type': 'Kreiraj tip aktivnosti', 'Create Airport': 'Kreiraj aerodrom', 'Create Alternative Item': 'Kreiraj alternativnu stavku', 'Create an Assessment Question': 'Kreiraj pitanje ocjene', 'Create Assessment Answer': 'Kreiraj odgovor ocjene', 'Create Assessment Template': 'Kreiraj predložak ocjene', 'Create Assessment': 'Kreiraj ocjene', 'Create Asset': 'Kreiraj sredstvo', 'Create Award': 'Kreiraj nagradu', 'Create Base Station': 'Kreiraj baznu stanicu', 'Create Bed Type': 'Kreiraj vrstu ležaja', 'Create Beneficiary Type': 'Kreiraj tip korisnika', 'Create Brand': 'Kreiraj proizvođačku marku', 'Create Campaign': 'Kreiraj kampanju', 'Create Case': 'Kreiraj slučaj', 'Create Catalog': 'Kreiraj katalog', 'Create Catalog Item': 'Kreiraj stavku kataloga', 'Create Certificate': 'Kreiraj certifikat', 'Create Checklist': 'Kreiraj listu zadataka', 'Create Cholera Treatment Capability Information': 'Kreiraj informacije o sposobnosti liječenja kolere', 'Create Cluster': 'Kreiraj grupisanje', 'Create Coalition': 'Kreiraj koaliciju', 'Create Community': 'Kreiraj Zajednicu', 'Create Competency Rating': 'Kreiraj status spremnosti', 'Create Contact': 'Kreiraj kontakt', 'Create Course': 'Kreiraj kurs', 'Create Dead Body Report': 'Kreiraj izvještaj o mrtvim tijelima', 'Create Department': 'Kreiraj odjeljenje', 'Create Details': 'Kreiraj detalje', 'Create Event': 'Kreiraj događaj', 'Create Event Type': 'Kreiraj tip događaja', 'Create Facility': 'Kreiraj objekat', 'Create Facility Type': 'Kreiraj vrstu objekta', 'Create Feature Layer': 'Kreiraj sloj karakteristika', 'Create GPS data': 'Kreiraj GPS podatke', 'Create Group': 'Kreiraj grupu', 'Create Group Entry': 'Kreiraj element grupe', 'Create Hazard': 'Kreiraj rizik', 'Create Heliport': 'Kreiraj heliodrom', 'Create Hospital': 'Kreiraj bolnicu', 'Create Identification Report': 'Kreiraj izvještaj o identifikacijama', 'Create Impact Assessment': 'Kreiraj procjenu utjecaja', 'Create Incident': 'Kreiraj incident', 'Create Incident Report': 'Kreiraj izvještaj o incidentu', 'Create Incident Type': 'Kreiraj tip incidenta', 'Create Item': 'Kreiraj stavku', 'Create Item Category': 'Kreiraj kategoriju stavke', 'Create Item Pack': 'Kreiraj paket stavki', 'Create Job': 'Kreiraj posao', 'Create Job Title': 'Kreiraj radno mjesto', 'Create Kit': 'Kreiraj komplet', 'Create Layer': 'Kreiraj sloj', 'Create Location': 'Kreiraj lokaciju', 'Create Location Hierarchy': 'Kreiraj hijerarhiju lokacija', 'Create Mailing List': 'Kreiraj listu za slanje poruka', 'Create Map Profile': 'Kreiraj konfiguraciju mape', 'Create Marker': 'Kreiraj marker', 'Create Member': 'Kreiraj člana', 'Create Milestone': 'Kreiraj prekretnicu', 'Create Mobile Impact Assessment': 'Kreiraj mobilnu procjenu utjecaja', 'Create Morgue': 'Kreiraj mrtvačnicu', 'Create Network': 'Kreiraj mrežu', 'Create New Asset': 'Kreiraj novo sredstvo', 'Create New Catalog Item': 'Kreiraj novu stavku kataloga', 'Create New Event': 'Napravi novi događaj', 'Create New Item Category': 'Kreiraj novu kategoriju stavke', 'Create new Office': 'Kreiraj novi ured', 'Create new Organization': 'Napravi novu organizaciju', 'Create New Request': 'Kreiraj novi zahtjev', 'Create New Scenario': 'Kreiranje novog scenarija', 'Create New Vehicle': 'Kreiraj novo vozilo', 'Create Office': 'Kreiraj kancelariju', 'Create Office Type': 'Kreiraj tip kancelarije', 'Create Organization': 'Kreiraj organizaciju', 'Create Organization Type': 'Kreiraj tip organizacije', 'Create Partner Organization': 'Kreiraj partnersku organizaciju', 'Create Personal Effects': 'Kreiraj lične uticaja', 'Create PoI Type': 'Kreiraj tačku interesa', 'Create Point of Interest': 'Kreiraj tačku interesa', 'Create Policy or Strategy': 'Kreiraj politiku ili strategiju', 'Create Post': 'Kreiraj blok ugradivog teksta', 'Create Program': 'Kreiraj program', 'Create Project': 'Kreiraj projekat', 'Create Projection': 'Kreiraj projekciju', 'Create Question Meta-Data': 'Kreiraj metapodatke pitanja', 'Create Rapid Assessment': 'Napravi brzu procjenu', 'Create Report': 'Kreiraj izvještaj', 'Create Repository': 'Kreiraj repozitorij', 'Create Request': 'Kreiraj zahtjev', 'Create Request Template': 'Kreiraj predložak zahtjeva', 'Create Resource': 'Kreiraj resurs', 'Create Resource Type': 'Kreiraj tip resursa', 'Create River': 'Kreiraj rijeku', 'Create Role': 'Kreiraj ulogu', 'Create Room': 'Kreiraj sobu', 'Create Seaport': 'Kreiraj luku', 'Create search': 'Kreiraj pretragu', 'Create Sector': 'Kreiraj sektor', 'Create Series': 'Kreiraj seriju', 'Create Service': 'Kreiraj uslugu', 'Create Service Profile': 'Kreiraj profil usluge', 'Create Shelter': 'Kreiraj sklonište', 'Create Shelter Service': 'Kreiraj uslugu skloništa', 'Create Shelter Status': 'Kreiraj status skloništa', 'Create Shelter Type': 'Kreiraj tip skloništa', 'Create Skill': 'Kreiraj vještinu', 'Create Skill Type': 'Kreiraj tip vještine', 'Create Staff Member': 'Kreiraj člana osoblja', 'Create Status': 'Kreiraj status', 'Create Status Report': 'Kreiraj statusni izvještaj', 'Create Supplier': 'Kreiraj dobavljača', 'Create Symbology': 'Kreiraj značenje simbola', 'Create Tag': 'Kreiraj oznaku', 'Create Task': 'Kreiraj zadatak', 'Create Team': 'Kreiraj tim', 'Create Template Section': 'Kreiraj odjeljak predloška', 'Create Theme': 'Kreiraj temu', 'Create Tour': 'Kreiraj turu', 'Create Training Event': 'Kreiraj događaj obuke', 'Create User': 'Kreiraj korisnika', 'Create Vehicle': 'Kreiraj vozilo', 'Create Vehicle Detail': 'Kreiraj detalje o vozilu', 'Create Volunteer': 'Kreiraj volontera', 'Create Volunteer Cluster': 'Kreiraj skup volontera', 'Create Volunteer Cluster Position': 'Kreiraj poziciju skupa volontera', 'Create Volunteer Cluster Type': 'Kreiraj tip skup volontera', 'Create Volunteer Role': 'Kreiraj ulogu volontera', 'Create Warehouse': 'Kreiraj skladište', 'Create, enter, and manage surveys.': 'Kreiraj, pristupi i upravljaj anketama.', 'created': 'kreirano', 'Created By': 'Kreirao', 'Created on %s': 'Kreirano %s', 'Created on %s by %s': 'Kreirano dana %s od strane %s', 'Creation of assessments': 'Kreiranje procjena', 'Creation of Surveys': 'Kreiranje anketa', 'Credential': 'Akreditiv', 'Credential added': 'Akreditiv dodan', 'Credential deleted': 'Akreditiv obrisan', 'Credential Details': 'Detalji o akreditivima', 'Credential updated': 'Akreditiv ažuriran', 'Credentialling Organization': 'Akreditirajuća organizacija', 'Credentials': 'Akreditivi', 'Credit Card': 'Kreditna kartica', 'Crime': 'Zločin', 'criminal intent': 'Namjera zločina', 'Criteria': 'Kriteriji', 'critical': 'kritično', 'Croatia': 'Hrvatska', 'Crop Image': 'Sasijeci sliku', 'cross-eyed': 'razrok', 'CSS file %s not writable - unable to apply theme!': 'CSS datoteka %s nemoguća za pisati - nije moguće promijeniti temu!', 'CSV file required': 'CSV datoteka je potrebna', 'Cuba': 'Kuba', 'curly': 'kovrčavo', 'Currency': 'Valuta', 'current': 'tekuće', 'Current': 'Tekući', 'Current community priorities': 'Trenutni prioriteti zajednice', 'Current Entries': 'Trenutni elementi', 'Current general needs': 'Trenutne generalne potrebe', 'Current greatest needs of vulnerable groups': 'Trenutno najveće potrebe pogođenih grupa', 'Current Group Members': 'Trenutni članovi grupe', 'Current Group Memberships': 'Trenutni članovi grupe', 'Current health problems': 'Trenutni zdravstveni problemi', 'Current Home Address': 'Trenutna kućna adresa', 'Current Identities': 'Trenutni identiteti', 'Current Location': 'Trenutna lokacija', 'Current Location Country': 'Zemlja trenutne lokacije', 'Current Location Phone Number': 'Broj telefona na trenutnoj lokaciji', 'Current Location Treating Hospital': 'Bolnica za tretman na trenutnoj lokaciji', 'Current Log Entries': 'Trenutne stavke zapisnika', 'Current main income sources': 'Trenutni glavni izvori prihoda', 'Current major expenses': 'Trrenutni veći troškovi', 'Current Memberships': 'Trenutno članstvo', 'Current Mileage': 'Trenutna kilometraža', 'Current Notes': 'Trenutne napomene', 'Current number of patients': 'Trenutni broj pacijenata', 'Current Owned By (Organization/Branch)': 'Trenutni vlasnik (organizacija/grana)', 'Current problems, categories': 'Trenutni problemi, kategorije', 'Current problems, details': 'Tekući problemi, pojedinosti', 'Current Records': 'Trenutni zapisi', 'Current Registrations': 'Trenutna Registracija', 'Current request': 'Trenutni zahtjev', 'Current response': 'trenutni odziv', 'Current session': 'Trenutna sesija', 'Current staffing level at the facility.': 'Trenutni nivo osoblja na objektu.', 'Current Status': 'Trenutni status', 'Current Team Members': 'Trenutni članovi tima', 'Current Twitter account': 'Trenutni twitter profil.', 'Current type of health problems, adults': 'Trenutna vrsta zdravstvenih problema odraslih', 'Current type of health problems, children': 'Drugi tip zdravstvenih problema, djeca', 'Current type of source for drinking water': 'Trenutni tip izvora pitke vode', 'Current type of source for sanitary water': 'Trenutni tip izvora sanitarne vode', 'Currently no Appraisals entered': 'Nema trenutno unesenih procjena ponuda', 'Currently no Certifications registered': 'Nema trenutno registrovanih potvrda', 'Currently no Competencies registered': 'Trenutno nema registrovanih kompetencija', 'Currently no Course Certicates registered': 'Trenutno nisu registrovani certifikati kurseva', 'Currently no Course Certificates registered': 'Trenutno nisu registrovani certifikati kursa', 'Currently no Credentials registered': 'Trenutno nema registriranih akreditiva', 'Currently no entries in the catalog': 'Trenutno nema unosa u katalog', 'Currently no hours recorded for this volunteer': 'Trenutno nema zabilježenih sati za ovog volontera', 'Currently no Missions registered': 'Trenutno nema registrovanih misija', 'Currently no Participants registered': 'Trenutno nema registrovanih učesnika', 'Currently no Professional Experience entered': 'Trenutno nije uneseno profesionalno iskustvo', 'Currently no programs registered': 'Trenutno nema registrovanih programa', 'Currently no Skill Equivalences registered': 'Trenutno nije zabilježena ekvivalencija vještina', 'Currently no Skills registered': 'Trenutno nema registriranih vještina', 'Currently no staff assigned': 'Trenutno nema dodijeljenog osoblja', 'Currently no training events registered': 'Trenutno nema događaja obuke registrovanih', 'Currently no Trainings registered': 'Trenutno nema registrovanih treninga', 'currently registered': 'Trenutno registrirani', 'Currently your system has default username and password. Username and Password are required by foriegn machines to sync data with your computer. You may set a username and password so that only those machines can fetch and submit data to your machines which your grant access by sharing your password.': 'Trenutno vaš sistem ima podrazumijevano korisničko ime i lozinku. Korisničko ime i lozinka su potrebni foriegn mašine za sinhronizaciju podataka s računalom. Možete postaviti korisničko ime i lozinku, tako da samo one mašine mogu dohvatiti i dostaviti podatke vaše mašine kojima ste dodijelili pristup dijeleći svoju lozinku.', 'Customs Capacity': 'Kapacitet carine', 'Customs Warehousing Storage Capacity': 'Kapacitet carinskog skladišta', 'Cyprus': 'Kipar', 'Czech Republic': 'Češka Republika', "Côte d'Ivoire": 'Obala Slonovače', 'Daily': 'Dnevno', 'daily': 'dnevno', 'Daily Work': 'Dnevni rad', 'Dam Overflow': 'Preliv Brane', 'Damage': 'Šteta', 'Damage Assessment': 'Procjena štete', 'Damage sustained': 'Pretrpljena šteta', 'Damaged': 'Oštećeno', 'Dangerous Person': 'Opasna osoba', 'dark': 'tamno', 'Dashboard': 'Kontrolna ploča', 'Data': 'Podatak', 'Data added to Theme Layer': 'Podaci dodani na tematski sloj', 'Data import policy': 'Politika uvoza podataka', 'Data not available': 'Podaci nije dostupni', 'Data Type': 'Tip podataka', 'data uploaded': 'podaci poslani', 'Data uploaded': 'Podaci preneseni', 'database': 'baza podataka', 'Database': 'Baza podataka', 'database %s select': 'baza podataka %s selektovana', 'Database %s select': 'baza podataka %s selektovana', 'DataTable ID': 'ID tabele podataka', 'DataTable row': 'Red tabele podataka', 'Date': 'Datum', 'Date & Time': 'Datum i vrijeme', 'Date and Time': 'Datum i vrijeme', 'Date and Time of Goods receipt. By default shows the current time but can be modified by editing in the drop down list.': 'Datum i vrijeme prijema robe. Normalno je ovdje prikazano trenutno vrijeme, ali se može izmijeniti u padajućoj listi.', 'Date and time this report relates to.': 'datum i vrijeme koje se odnose na ovaj izvještaj', 'Date Avaialble': 'Datum dostupan', 'Date Available': 'Datum dostupnosti', 'Date Created': 'Datum kreiranja', 'Date Due': 'Krajnji rok', 'Date Expected': 'Očekivan datum plaćanja', 'Date Modified': 'Datum izmjene', 'Date must be %(max)s or earlier!': 'Datum mora biti %(max)s ili raniji!', 'Date must be %(min)s or later!': 'Datum mora biti %(min)s ili kasniji!', 'Date must be between %(min)s and %(max)s!': 'Datum mora biti između %(min)s i %(max)s!', 'Date Needed By': 'Datum kada je potrebno', 'Date of Birth': 'Datum rođenja', 'Date of Latest Information on Beneficiaries Reached': 'Datum najnovijih informacija o korsnicima dostignut.', 'Date of Recovery': 'Datum pronalaska', 'Date of Report': 'Datum podnošenja izvještaja', 'Date of Treatment': 'Datum tretmana', 'Date Printed': 'Datum štampe', 'Date Published': 'Datum objavljivanja', 'Date Question': 'Datum pitanja', 'Date Received': 'Datum prijema', 'Date Released': 'Datum izlaza', 'Date Repacked': 'Datum ponovnog pakovanja', 'Date Requested': 'Trazeni datum', 'Date Required': 'Neophodan datum', 'Date Required Until': 'Datum potreban do', 'Date Sent': 'Datum slanja', 'Date Taken': 'Datum preuzimanja', 'Date Until': 'Datum do', 'Date/Time': 'Datum/Vrijeme', 'Date/Time of Alert': 'Vrijeme i datum uzbune', 'Date/Time of Dispatch': 'Vrijeme i datum raspodjele', 'Date/Time of Find': 'Datum/Vrijeme pretrage', 'Date/Time when found': 'Datum/Vrijeme kada je pronađeno', 'Date/Time when last seen': 'Dan/Vrijeme posljednjeg viđenja', 'Day': 'Dan', 'db': 'baza podataka', 'DC': 'DC', 'De-duplicate': 'Ukloni duplikat', 'De-duplicate Records': 'Ukloni duple slogove', 'De-duplicator': 'De-duplicator(Ukloni duple)', 'Dead Bodies': 'Mrtva Tijela', 'Dead Body': 'Leš', 'Dead Body Details': 'Detalji o mrtvim tijelima', 'Dead body report added': 'Dodat izvještaj o mrtvom tijelu', 'Dead body report deleted': 'Obrisan izvještaj o mrtvom tijelu', 'Dead body report updated': 'Ažuriran izvještaj o mrtvom tijelu', 'Dead Body Reports': 'Izvještaj o mrtvim tijelima', 'Deaths in the past 24h': 'Broj smrtnih slučajeva u protekla 24 sata', 'Deaths/24hrs': 'Smrtnost/24h', 'Debug': 'Praćenje grešaka', 'deceased': 'preminuo', 'Deceased': 'Preminuo', 'Decimal Degrees': 'Decimalni stepeni', 'DECISION': 'ODLUKA', 'Decision': 'Odluka', 'Decomposed': 'Raspadnuto', 'deep': 'duboko', 'Default': 'Zadano', 'Default Base layer?': 'Podrazumijevani bazni sloj', 'Default Height of the map window.': 'Početna visina prozora mape.', 'Default Height of the map window. In Window layout the map maximises to fill the window, so no need to set a large value here.': 'Podrazumijevana visina prozora mape. U rasporedu prozora karta se maksimizira da popuni prozor , nema potrebe da se ovdje postavlja velika vrijednost.', 'Default Location': 'Podrazumijevana lokacija', 'Default Map': 'Osnovna karta', 'Default map question': 'Podrazumijevano pitanje mape', 'Default Marker': 'Zadani Marker', 'Default Realm': 'Zadano carstvo', 'Default Realm = All Entities the User is a Staff Member of': 'Podrazumijevano carstvo = Sve jedinke čiji je korisnik uposlenik', 'Default synchronization policy': 'Uobičajena polica sinhronizacije', 'Default Width of the map window.': 'Početna vrijednost širine prozora mape.', 'Default Width of the map window. In Window layout the map maximises to fill the window, so no need to set a large value here.': 'Podrazumijevana širina prozora mape. U rasporedu prozora karta se maksimizira da popuni prozor , nema potrebe da se ovdje postavlja velika vrijednost.', 'Default?': 'Podrazumijevano?', 'Defaults updated': 'Podrazumijevane vrijednosti ažurirane', 'Defecation area for animals': 'Područje za vršenje nužde za životinje', 'deferred': 'odgođen', 'Define Scenarios for allocation of appropriate Resources (Human, Assets & Facilities).': 'Definirajte scenarije za raspodjelu prikladnih Resursa ( Ljudi ,sredstva i objekti).', 'Defines the icon used for display of features on handheld GPS.': 'Definiše ikonu korištenu za prikaz karakteristika na ručnom GPS uređaju.', 'Defines the icon used for display of features on interactive map & KML exports.': 'Definira ikonu korištenu za prikaz karakteristika na interaktivnoj mapi i KML exportima.', 'Defines the marker used for display & the attributes visible in the popup.': 'Definira marker korišten za prikaz i atribute vidljive u prozoru.', 'Degrees in a latitude must be between -90 to 90.': 'Stepeni u geografskoj dužini moraju biti između -90 to 90.', 'Degrees in a longitude must be between -180 to 180.': 'Stepeni u geografskoj širini moraju biti između -180 to 180.', 'Degrees must be a number between -180 and 180': 'Stepeni moraju biti broj između -180 i 180', 'Degrees must be a number.': 'Stepen mora biti broj', 'Dehydration': 'Dehidracija', 'Delete': 'Brisanje', 'delete': 'brisanje', 'Delete Affiliation': 'Obriši namještenje', 'Delete Aid Request': 'Obriši zahtjev za pomoć', 'Delete Airport': 'Obriši aerodrom', 'delete all checked': 'Izbriši sve provjerene', 'Delete all data of this type which the user has permission to before upload. This is designed for workflows where the data is maintained in an offline spreadsheet and uploaded just for Reads.': 'Obriši sve podatke ovog tipa za koje korisnik ima odobrenje prije postavljanja na server. Ovo je dizajnirano za radne tokove gdje se podaci ažuriraju na vanmrežnim tablicama i šalju samo za čitanje', 'Delete Alternative Item': 'Obriši alternativnu stavku', 'Delete Appraisal': 'Obriši ispunjenje', 'Delete Assessment': 'Brisanje Procjene', 'Delete Assessment Summary': 'Obriši sažetak procjene', 'Delete Asset': 'Obriši sredstvo', 'Delete Asset Log Entry': 'Obriši unosa u zapisniku sredstava', 'Delete Award': 'Obriši nagradu', 'Delete Base Station': 'Obriši baznu stanicu', 'Delete Baseline': 'Izbriši referentnu tačku', 'Delete Baseline Type': 'Obrišite tip referentne tačke', 'Delete Branch': 'Obriši ogranak', 'Delete Brand': 'Obriši proizvođačku marku', 'Delete Budget': 'Obriši budžet', 'Delete Bundle': 'Izbriši paket', 'Delete Case': 'Obriši slučaj', 'Delete Catalog': 'Obriši katalog', 'Delete Catalog Item': 'Obriši stavku kataloga', 'Delete Certificate': 'Obriši certifikat', 'Delete Certification': 'Obriši certifikat', 'Delete Cluster': 'Obriši grupisanje', 'Delete Cluster Subsector': 'Obriši podsektor skupa', 'Delete Commitment': 'Obriši Zajednicu', 'Delete Commitment Item': 'Obriši stavku obaveze', 'Delete Competency': 'Izbriši stručnost', 'Delete Competency Rating': 'Obriši status spremnosti', 'Delete Contact': 'Obriši kontakt', 'Delete Contact Information': 'Obriši informacije o kontaktu', 'Delete Course': 'Obriši kurs', 'Delete Course Certicate': 'Obriši certifikat kursa', 'Delete Course Certificate': 'Obriši certifikat kursa', 'Delete Credential': 'Obriši akreditiv', 'Delete Data from Theme layer': 'Obriši podatke iz tematskog sloja', 'Delete Department': 'Obriši odjeljenje', 'Delete Detail': 'Obriši detalje', 'Delete Distribution': 'Izbriši raspodjelu', 'Delete Distribution Item': 'Obriši distribucijsku stavku', 'Delete Document': 'Obriši dokument', 'Delete Donation': 'Obriši donaciju', 'Delete Donor': 'Obriši donatora', 'Delete Email': 'Obriši e-pošteu', 'Delete Entry': 'Obriši unos', 'Delete Event': 'Obriši događaj', 'Delete Event Type': 'Obriši tip događaja', 'Delete Facility': 'Obriši objekat', 'Delete Facility Type': 'Obriši vrstu objekta', 'Delete Feature Class': 'Brisanje klasa karakteristika', 'Delete Feature Layer': 'Obriši sloj karakteristika', 'Delete Find Report': 'Obriši traženi izvještaj', 'Delete from Server?': 'Izbrisati sa servera?', 'Delete GPS data': 'Obriši GPS podatke', 'Delete Group': 'Obriši grupu', 'Delete Hazard': 'Obriši rizik', 'Delete Heliport': 'Obriši heliodrom', 'Delete Home': 'Izbriši dom', 'Delete Hospital': 'Obriši bolnicu', 'Delete Hours': 'Obriši sate', 'Delete Image': 'Obriši sliku', 'Delete Impact': 'Obriši utjeicaj', 'Delete Impact Type': 'Izbriši tip utjecaja', 'Delete Incident Report': 'Obriši izvještaj o incidentu', 'Delete Inventory Item': 'Brisanje artikla u skladištu', 'Delete Item': 'Obriši stavku', 'Delete Item Category': 'Obriši kategoriju stavke', 'Delete Item from Request': 'Obriši stavku iz zahtjeva', 'Delete Item Pack': 'Obriši paket stavki', 'Delete Item Packet': 'Obriši paket stavki', 'Delete Job Role': 'Izbriši ulogu posla', 'Delete Job Title': 'Obriši radno mjesto', 'Delete Key': 'Obriši ključ', 'Delete Kit': 'Obriši komplet', 'Delete Layer': 'Obriši sloj', 'Delete Level 1 Assessment': 'Izbriši procjenu nivoa 1', 'Delete Level 2 Assessment': 'Obriši procjenu nivoa 2', 'Delete Location': 'Obriši lokaciju', 'Delete Location Hierarchy': 'Obriši hijerarhiju lokacija', 'Delete Mailing List': 'Obriši listu za slanje poruka', 'Delete Map Profile': 'Obriši konfiguraciju mape', 'Delete Marker': 'Obriši marker', 'Delete Membership': 'Obriši članstvo', 'Delete Message': 'Obriši poruku', 'Delete Mission': 'Brisanje misije', 'Delete Morgue': 'Obriši mrtvačnicu', 'Delete Need': 'Obriši potrebu', 'Delete Need Type': 'Obriši tip potrebe', 'Delete Office': 'Obriši kancelariju', 'Delete Office Type': 'Obriši tip kancelarije', 'Delete Order': 'Obriši narudžbu', 'Delete Organization': 'Obriši organizaciju', 'Delete Organization Domain': 'Obriši domenu organizacije', 'Delete Organization Needs': 'Obriši potrebe organizacije', 'Delete Organization Type': 'Obriši tip organizacije', 'Delete Participant': 'Obriši učesnika', 'Delete Partner Organization': 'Obriši partnersku organizaciju', 'Delete Patient': 'Obriši pacijenta', 'Delete Peer': 'Obriši saradnika', 'Delete Person': 'Obriši osobu', 'Delete Photo': 'Obriši fotografiju', 'Delete PoI Type': 'Obriši tačku interesa', 'Delete Point of Interest': 'Obriši tačku interesa', 'Delete Population Statistic': 'Obriši statistiku o populaciji', 'Delete Position': 'Obriši poziciju', 'Delete Post': 'Obriši blok ugradivog teksta', 'Delete Professional Experience': 'Obriši profesionalno iskustvo', 'Delete Program': 'Obriši program', 'Delete Project': 'Obriši projekat', 'Delete Projection': 'Obriši projekciju', 'Delete Rapid Assessment': 'Izbriši brzu procjenu', 'Delete Received Item': 'Izbriši primljenu stavku', 'Delete Received Shipment': 'Obriši primljenu pošiljku', 'Delete Record': 'Obriši zapis', 'Delete Recovery Report': 'Obriši izvještaj o pronalaženju', 'Delete Region': 'Obriši područje', 'Delete Relative': 'Obriši srodnika', 'Delete Report': 'Obriši izvještaj', 'Delete Request': 'Obriši zahtjev', 'Delete Request Item': 'Izbiši stavku zahtjeva', 'Delete Request Template': 'Obriši predložak zahtjeva', 'Delete Resource': 'Obriši resurs', 'Delete Resource Type': 'Obriši tip resursa', 'Delete Role': 'Obriši ulogu', 'Delete Room': 'Obriši sobu', 'Delete saved search': 'Obriši snimljenu pretragu', 'Delete Scenario': 'Obriši scenarij', 'Delete Seaport': 'Obriši luku', 'Delete Section': 'Obriši sekciju', 'Delete Sector': 'Obriši sektor', 'Delete Sent Item': 'Izbriši poslani predmet', 'Delete Sent Shipment': 'Obriši poslanu pošiljku', 'Delete Service': 'Obriši uslugu', 'Delete Service Profile': 'Obriši profil usluge', 'Delete Setting': 'Uklonite postavke', 'Delete Shipment Item': 'Obriši predmet pošiljke', 'Delete Site Needs': 'Obriši potrebe mjesta', 'Delete Skill': 'Obriši vještinu', 'Delete Skill Equivalence': 'Obriši ekvivalenciju vještine', 'Delete Skill Provision': 'Obriši pružanje vještina', 'Delete Skill Type': 'Obriši tip vještine', 'Delete SMS': 'Obriši SMS', 'Delete Staff Assignment': 'Obriši dodjelu osoblja', 'Delete Staff Member': 'Obriši člana osoblja', 'Delete Staff Type': 'Izbriši tip osoblja', 'Delete Status': 'Obriši status', 'Delete Stock Adjustment': 'Obriši prilagođenje zalihe', 'Delete Stock Count': 'Obriši broj zaliha', 'Delete Subscription': 'Izbriši pretplatu', 'Delete Subsector': 'Izbriši podsektor', 'Delete Supplier': 'Obriši dobavljača', 'Delete Survey Answer': 'Izbriši anketni odgovor', 'Delete Survey Question': 'Izbriši anketno pitanje', 'Delete Survey Section': 'Obriši anketnu sekciju', 'Delete Survey Series': 'Izbriši niz pregleda', 'Delete Survey Template': 'Obrišite šablon ankete', 'Delete Symbology': 'Obriši značenje simbola', 'Delete Theme': 'Obriši temu', 'Delete this Assessment Answer': 'Obriši ovaj odgovor ocjene', 'Delete this Assessment Question': 'Obriši ovo pitanje ocjene', 'Delete this Assessment Template': 'Obriši ovaj predložak ocjene', 'Delete this Completed Assessment Form': 'Obriši ovaj formular za završenu procjenu', 'Delete this Disaster Assessment': 'Obriši ovu procjenu katastrofe', 'Delete this Filter': 'Obriši filter', 'Delete this Question Meta-Data': 'Obriši ove metapodatke pitanja', 'Delete this Template Section': 'Obriši ovaj odjeljak predloška', 'Delete Tour': 'Obriši turu', 'Delete Training': 'Obriši obuku', 'Delete Training Event': 'Obriši događaj obuke', 'Delete Tweet': 'Obriši tweet', 'Delete Unit': 'Obriši jedinicu', 'Delete User': 'Obriši korisnika', 'Delete Vehicle': 'Obriši vozilo', 'Delete Vehicle Details': 'Obriši detalje o vozilu', 'Delete Vehicle Type': 'Obriši vrstu vozila', 'Delete Volunteer': 'Obriši volontera', 'Delete Volunteer Cluster': 'Obriši skup volontera', 'Delete Volunteer Cluster Position': 'Obriši poziciju skupa volontera', 'Delete Volunteer Cluster Type': 'Obriši tip skup volontera', 'Delete Volunteer Role': 'Obriši ulogu volontera', 'Delete Warehouse': 'Obriši skladište', 'Delete Warehouse Item': 'Obriši stavku skladišta', 'Delete:': 'Obriši:', 'deleted': 'obrisano', 'Deliver To': 'Isporuka za', 'Delivered By': 'Isporučio', 'Delivered To': 'Isporučeno na', 'Delphi Decision Maker': 'Delphi stvaralac odluka', 'Delphi toma de decisiones': 'Delphi stvaralac odluka', 'Demographic': 'Demografski', 'Demographics': 'Demografija', 'Demonstrations': 'Demonstracije', 'denied': 'odbijeno', 'Dental Examination': 'Pregled zuba', 'Dental Profile': 'Zubni Profil', 'Department / Unit': 'Odjel/Jedinica', 'Department added': 'Odjel dodan', 'Department Catalog': 'Katalog odjeljenja', 'Department deleted': 'Odjel obrisan', 'Department Details': 'Detalji odjeljenja', 'Department updated': 'Odjel ažuriran', 'Deployed': 'Dodijeljeno', 'Deployment': 'Isporuka', 'Deployment Alert': 'Upozorenje o dostavi', 'Deployment Location': 'Lokacija Razvrstavanja', 'Deployment Request': 'Zahtjev za dostavu', 'Describe the condition of the roads from/to the facility.': 'Opišite stanje puteva od/do ovog objekta', 'Describe the condition of the roads to your hospital.': 'Opišite stanje ceste prema Vašoj bolnici.', "Describe the procedure which this record relates to (e.g. 'medical examination')": "Opisati proceduru na koju se odnosi ovaj zapis (npr. 'medicinsko ispitivanje')", 'Description': 'Opis', 'description': 'opis', 'Description of Bin Type': 'Opis korpe za smještaj', 'Description of Contacts': 'Opis kontakta', 'Description of defecation area': 'Opis područja za vršenje nužde', 'Description of drinking water source': 'Opis izvora pitke vode', 'Description of perimeter fencing, security guards, security lighting.': 'Opis veličine ograde, stražara, sigurnosnih svjetala.', 'Description of sanitary water source': 'Opis sanitarnih izvora vode', 'Description of water source before the disaster': 'Opis vodenih izvora prije katastrofe', 'Description:': 'Opis:', 'Descriptive Text (e.g., Prose, etc)': 'Opisni tekst', 'design': 'dizajn', 'Designated for': 'Dizajnirano za', 'Desire to remain with family': 'Želja da se ostane sa porodicom', 'Destination': 'Odredište', 'Destroyed': 'Uništen', 'Detail': 'Detalji', 'Detail added': 'Detalj dodan', 'Detail deleted': 'Detalj obrisan', 'Detail updated': 'Detalj ažuriran', 'Detailed Description/URL': 'Detaljan opis /URL', 'Details': 'Detalji', 'Details field is required!': 'Polje detalji je obavezno', 'Details of Disaster Assessment': 'Detalji procjene katastrofe', 'Details of each question in the Template': 'Detalji svakog pitanja u predlošku', 'Dialysis': 'Dijaliza', 'Diaphragms, horizontal bracing': 'Diafragma, horizontalno učvršćenje', 'Diarrhea': 'Proljev', 'Diarrhea among children under 5': 'Dijareja među djecom mlađom od 5 godina', 'Dignitary Visit': 'Posjeta funkcionera', 'Direction': 'Smijer', 'Disabilities': 'Invaliditeti', 'Disable': 'Onemogući', 'Disabled': 'Onemogućeno', 'Disabled participating in coping activities': 'Učestvovanje osoba sa posebnim potrebama u aktivnostima za suočavanje sa stresom', 'Disabled?': 'Osoba sa invaliditetom?', 'Disaster': 'Katastrofa', 'Disaster Assessment added': 'Procjena katastrofe dodana', 'Disaster Assessment Chart': 'Dijagram procjene katastrofe', 'Disaster Assessment deleted': 'Procjena katastrofe obrisana', 'Disaster Assessment Map': 'Mapa procjene katastrofe', 'Disaster Assessment Summary': 'Rezime procjene katastrofe', 'Disaster Assessment updated': 'Procjena katastrofe ažurirana', 'Disaster Assessments': 'Procjene katastrofe', 'Disaster clean-up/repairs': 'Čišćenje/opravka od nepogode', 'Disaster Victim Identification': 'Identifikacija žrtava nesreće', 'Disaster Victim Registry': 'Registar žrtava katastrofe', 'Discharge (cusecs)': 'Pražnjenje (kubni metar po sekundi)', 'Discharges/24hrs': 'Istovari/24 sata', 'Discussion Forum': 'Forum za diskusiju', 'Discussion Forum on item': 'Forum za rasprave na određenu temu', 'Disease vectors': 'Vektori bolesti', 'diseased': 'bolesni', 'Disk Cache Keys': 'Disk cache ključevi', 'Disk Cleared': 'Disk očišćen', 'Dispatch': 'Isporuči', 'Dispatch Time': 'Vrijeme isporuke', 'Dispensary': 'Dispanzer', 'displaced': 'raseljeni', 'Displaced': 'Raseljen', 'Displaced Populations': 'Raseljeno stanovništvo', 'Display Chart': 'Prikaži dijagram', 'Display name': 'Ime za prikaz', 'Display Polygons?': 'Prikaži poligone?', 'Display Question on Map': 'Prikaži pitanje na karti.', 'Display Routes?': 'Prikazati rute?', 'Display Selected Questions': 'Prikaži izabrana pitanja', 'Display Tracks?': 'Prikaži tragove?', 'Display Waypoints?': 'Prikazati Putne tačke?', 'Dispose': 'Raspoloživ', 'Distance between defecation area and water source': 'Udaljenost između površina gdje se obavlja nužda i izvora vode', 'Distance between latrines and temporary shelter in meters': 'Udaljenost između površina gdje se obavlja nužda i privremenog skloništa', 'Distance between shelter and latrines': 'Udaljenost između skloništa i zahoda', 'Distance from %s:': 'Razdaljina od %s:', 'Distance(Kms)': 'Udaljenost(kilometri)', 'Distributed without Record': 'Raspodjeljeno bez zapisa', 'Distribution': 'Raspodjela', 'Distribution Added': 'Dodata raspodjela', 'Distribution Deleted': 'Raspodjela izbrisana', 'Distribution Details': 'Detalji raspodjele', 'Distribution Groups': 'Distribucijske grupe', 'Distribution groups': 'Distribucijske grupe', 'Distribution Item': 'Distribucijska stavka', 'Distribution Item Added': 'Dodata distribucijska stavka', 'Distribution Item Deleted': 'Stavka raspodjele je obrisana.', 'Distribution Item Details': 'Detalji stavke distribucije', 'Distribution Item Updated': 'Stavka raspodjele je ažurirana', 'Distribution Items': 'Raspodjela stavki', 'Distribution Report': 'Izvještaj raspodjele', 'Distribution Updated': 'Raspodjela ažurirana', 'Distributions': 'Raspodjele', 'District': 'Distrikt', 'divorced': 'razveden', 'Djibouti': 'DžibutiĐibuti', 'DM Planning': 'DM planiranje', 'DNA Profile': 'DNA profil', 'DNA Profiling': 'Prikaz profila preko DNA', 'Do households have appropriate equipment and materials to cook their food (stove, pots, dished plates, and a mug/drinking vessel, etc)?': 'Da domaćinstva imaju odgovarajuću opremu i materijale da kuhaju svoju hranu (štednjak, lonci, tanjir, šolje / posude za piće, itd)?', 'Do households have bedding materials available (tarps, plastic mats, blankets)?': 'Da li domaćinstva imaju odgovaraći materijal za ležaje (prekrivače, madrace)?', 'Do households have household water storage containers?': 'Da li domaćinstva imaju spremnike za vodu?', 'Do women and girls have easy access to sanitary materials?': 'Da li žene i djevojke imaju lak pristup sanitarnim materijalima?', 'Do you have access to cash to restart your business?': 'Imate li pristup novcu da ponovo započnete poslovne aktivnosti?', 'Do you know of any incidents of violence?': 'Znate li slučajeve nasilja?', 'Do you know of children living on their own (without adults)?': 'Poznajete li djecu koja žive sama (bez staratelja)?', 'Do you know of children separated from their parents or caregivers?': 'Da li poznajete djecu odvojenu od svojih roditelja/staratelja?', 'Do you know of children that have been sent to safe places?': 'Poznajete li djecu koja su poslana na sigurna mjesta?', 'Do you know of children that have disappeared without explanation in the period since the disaster?': 'Poznajete li djecu koja su nestala bez objašnjenja u periodu nakon katastrofe?', 'Do you know of parents/caregivers missing children?': 'Poznajete li roditelje/Staratelje djece koja su nestala?', 'Do you prefer': 'Da li više volite', 'Do you really want to approve this record?': 'Želite li zaista potvrditi ovaj zapis?', 'Do you really want to delete these records?': 'Da li zaista želite obrisati ove zapise?', 'Do you really want to delete this record? (This action can not be reversed)': 'Želite li zaista obrisati ovaj zapis? (akcija se ne može vratiti', 'Do you want to cancel this received shipment? The items will be removed from the Inventory. This action CANNOT be undone!': 'Želite li otkazati ovu primljenu pošiljku? Predmeti će biti uklonjeni iz inventara. Ova akcija NE MOŽE biti poništena!', 'Do you want to cancel this sent shipment? The items will be returned to the Inventory. This action CANNOT be undone!': 'Da li želite otkazati ovu posiljku? Artikal će biti vraćen u inventar. Ovo se NE MOŽE poništiti!', 'Do you want to cancel this sent shipment? The items will be returned to the Warehouse. This action CANNOT be undone!': 'Da li želite otkazati ovu pošiljku? Artikli će biti vraćeni u skladište. Ovo se NE MOŽE poništiti!', 'Do you want to commit to this request?': 'Želite li zaista potvrditi ovaj zahtjev?', 'Do you want to complete & close this adjustment?': 'Želite li završiti i zatvoriti ovo prilagođenje?', 'Do you want to complete the return process?': 'Želite li završiti proces vraćanja.', 'Do you want to over-write the file metadata with new default values?': 'Želite li prebrisati metapodatke datoteke s novim podrazumijevanim vrijednostima?', 'Do you want to receive this shipment?': 'Da li želite primiti ovu pošiljku?', 'Do you want to send these Committed items?': 'Da li želite poslati ove izvršene stavke?', 'Do you want to send this shipment?': 'Da li želite poslati ovu pošiljku?', 'Document': 'Dokument', 'Document added': 'Dokumenti dodani', 'Document deleted': 'Dokument obrisan', 'Document Details': 'Detalji dokumenta', 'Document removed': 'Dokument uklonjen', 'Document Scan': 'Skeniranje dokumenta', 'Document updated': 'Dokumenti ažurirani', 'Document:': 'Dokument:', 'Documents': 'Dokumenti', 'Documents and Images': 'Dokumenti i slike', 'Documents and Photos': 'Dokumenti i slike', 'Does this facility provide a cholera treatment center?': 'Da li ovaj objekat pruža tretman prilikom kolere?', 'Doing nothing (no structured activity)': 'Ne radeći ništa ( nema struktuirane aktivnosti )', 'Dollars': 'Dolari', 'Domain': 'Domena', 'Domestic chores': 'Domaći poslovi', 'Dominica': 'Dominika', 'Dominican Republic': 'Dominikanska Republika', "Don't Know": 'Ne znam', 'DONATE': 'DONACIJA', 'Donate to this Request': 'Doniraj zahtjev', 'Donated': 'Donirano', 'Donating Organization': 'Organizacija donatora', 'Donation': 'Donacija', 'Donation Added': 'Donacija dodana', 'Donation Canceled': 'Donacija otkazana', 'Donation Certificate': 'Certifikat o donaciji', 'Donation Details': 'Detalji donacije', 'Donation Phone #': 'telefon za donacije #', 'Donation Updated': 'Donacija ažurirana', 'Donations': 'Donacije', 'done!': 'učinjeno!', 'Donor': 'Donator', 'Donor added': 'Donator dodan', 'Donor deleted': 'Donator obrisan', 'Donor Details': 'Detalji Donatora', 'Donor updated': 'Donator ažuriran', 'Donors': 'donatori', 'Donors Report': 'Izvještaj davaoca', 'Doolie Transportation Ambulance': 'Doolie prevozna kola hitne pomoći', 'Door frame': 'Okvir od vrata', 'Download': 'Preuzmi', 'Download Assessment Form Document': 'Preuzmi formular procjene kao dokument', 'Download Assessment Form Spreadsheet': 'Preuzmi formular procjene kao tablicu', 'Download last build': 'Preuzmi samo posljednje kompajliranje', 'Download OCR-able PDF Form': 'Preuzmi OCR čitljiv PDF Formular', 'Download PDF': 'Preuzmite PDF', 'Download Template': 'Preuzimanje šablona', 'Draft': 'Nacrt', 'Draft Features': 'Nacrt objekata', 'Drag an image below to crop and scale it before uploading it:': 'Povucite sliku ispod da je izrežete i promijenite joj veličinu prije postavljanja.', 'Drainage': 'Drenaža', 'Draw on Map': 'Prikaži na karti', 'Drawing up a Budget for Staff & Equipment across various Locations.': 'Izrada nacrta budžeta za osoblje i opremu na različitim lokacijama.', 'Drill Down by Group': 'Dublja analiza po grupi', 'Drill Down by Incident': 'Dublja analizira po incidentu', 'Drill Down by Shelter': 'Dublja analiza po skloništu', 'Driver Phone Number': 'Telefonski broj vozača', 'Drivers': 'Drajveri', 'Driving License': 'Vozačka dozvola', 'Drop-off Location for Goods?': 'Lokacija za ostavljanje robe?', 'Drought': 'Suša', 'DRRPP Extensions': 'DRRPP proširenja', 'Drugs': 'Lijekovi', 'Dry Dock': 'Suho sidrište', 'Due %(date)s': 'Rok %(date)s', 'Dug Well': 'Iskopani bunar', 'Dump': 'Izdvajanje', 'Duplicate': 'Dupliciraj', 'duplicate': 'duplikat', 'Duplicate Locations': 'Dupliraj lokacije', 'Duplicate?': 'Napraviti kopiju?', 'Duration': 'Trajanje', 'Duration (months)': 'Trajanje (mjeseci)', 'Dust Storm': 'Prašnjava oluja', 'DVI Navigator': 'DVI Navigator', 'Dwelling': 'Stambeni', 'Dwellings': 'Stambene jedinice', 'dyed': 'umrli', 'E-mail': 'E-pošta', 'Early Recovery': 'Rani oporavak', 'Early warning': 'Rano upozorenje', 'Ears, angle': 'Uši, Uši, ugao', 'Ears, size': 'Uši, veličina', 'Earth Enabled?': 'Zemlja uključena?', 'Earthquake': 'Zemljotres', 'East Timor': 'Istočni Timor', 'Easy access to sanitation items for women/girls': 'Lak pristup sanitarnim predmetima za žene/djevojke', 'Ecuador': 'Ekvador', 'Edit': 'Izmijeni', 'edit': 'uredi', 'Edit %(site_label)s Status': 'Uredi %(site_label)s status', 'Edit %(type)s': 'Uredi %(type)s', "Edit 'More Info'": 'Uredi dodatne podatke', 'Edit a Missing Person': 'Uredi nestalu osobu', 'Edit Activity': 'Uredi aktivnost', 'Edit Activity Organization': 'Uredi organizaciju', 'Edit Activity Type': 'Uredi tip aktivnosti', 'Edit Address': 'Uredi adresu', 'Edit Adjustment': 'Uredi podešavanja', 'Edit Affiliation': 'Uredi namještenje', 'Edit Airport': 'Uredi aerodrom', 'Edit Alternative Item': 'Uredi alternativnu stavku', 'Edit Annual Budget': 'Uredi godišnji budžet', 'Edit Application': 'Uredi aplikaciju', 'Edit Appraisal': 'Uredi ispunjenje', 'Edit Assessment': 'Uredi procjenu', 'Edit Assessment Answer': 'Uredi odgovor ocjene', 'Edit Assessment Question': 'Uredi pitanje ocjene', 'Edit Assessment Summary': 'Izmjena sažetka procjene', 'Edit Assessment Template': 'Uredi predložak ocjene', 'Edit Asset': 'Uredi sredstvo', 'Edit Asset Log Entry': 'Uredi stavku zapisnika o sredstvima', 'Edit Award': 'Uredi nagradu', 'Edit Base Station': 'Uredi baznu stanicu', 'Edit Baseline': 'Uredi referentnu tačku', 'Edit Baseline Type': 'Uredi Tip Referentne tačke', 'Edit Beneficiaries': 'Uredi korisnika', 'Edit Beneficiary Type': 'Uredi tip korisnika', 'Edit Branch Organization': 'Uredi ogranak organizacije', 'Edit Brand': 'Uredi proizvođačku marku', 'Edit Budget': 'Promjeni budžet', 'Edit Bundle': 'Promjeni paket', 'Edit Camp': 'Uredi kamp', 'Edit Camp Service': 'Uredi uslugu kampa', 'Edit Camp Status': 'Uredi status kampa', 'Edit Camp Type': 'Uredi tip kampa', 'Edit Campaign': 'Uredi kampanju', 'Edit Campaign Message': 'Uredi poruku kampanje', 'Edit Case': 'Uredi slučaj', 'Edit Catalog': 'Uredi katalog', 'Edit Catalog Item': 'Uredi stavku kataloga', 'Edit Certificate': 'Uredi certifikat', 'Edit Certification': 'Uredi certifikaciju', 'Edit Cluster': 'Uredi grupisanje', 'Edit Cluster Subsector': 'Uredi podsektor skupa', 'Edit Commitment': 'Uredi zaduženje', 'Edit Commitment Item': 'Uredi stavku zaduženje', 'Edit Committed People': 'Uredi zadužene ljude', 'Edit Committed Person': 'Uredi zaduženu osobu', 'Edit Community Details': 'Uredi podatke zajednice', 'Edit Competency': 'Uredi kompetentnost', 'Edit Competency Rating': 'Uredi status spremnosti', 'Edit Completed Assessment Form': 'Uredi završen formular ocjene', 'Edit Config': 'Izmijeni konfiguraciju', 'Edit Contact': 'Uredi kontakt', 'Edit Contact Details': 'Uredi detalje kontakta', 'Edit Contact Information': 'Uredi informacije o kontaktu', 'Edit Contents': 'Uredi sadržaj', 'Edit Course': 'Uredi kurs', 'Edit Course Certicate': 'Uredi certifikat za tečaj', 'Edit Course Certificate': 'Uredi certifikat kursa', 'Edit Credential': 'Uredi akreditiv', 'Edit current record': 'Uredi trenutni zapis', 'Edit Dead Body Details': 'Uredi detalje izvještaja o mrtvim tijelima', 'Edit Department': 'Uredi odjeljenje', 'Edit Description': 'Uredi opis', 'Edit Details': 'Uredi detalje', 'Edit Disaster Victims': 'Uredi žrtve katastrofe', 'Edit Distribution': 'Uredi raspodjelu', 'Edit Distribution Item': 'Uredi stavku raspodjele', 'Edit Document': 'Uredi dokument', 'Edit Donation': 'Uredi donaciju', 'Edit Donor': 'Uredi donatora', 'Edit DRRPP Extensions': 'Uredi DRRPP proširenja', 'Edit Education Details': 'Uredi podatke o obrazovanju', 'Edit Education Level': 'Uredi nivo obrazovanja', 'Edit Email': 'Izmijeni e-mail', 'Edit Email Settings': 'Uredi postavke Email-a', 'Edit Entry': 'Uredi unos', 'Edit Event': 'Uredi događaj', 'Edit Event Type': 'Uredi tip događaja', 'Edit Experience': 'Uredi iskustvo', 'Edit Facility': 'Uredi objekat', 'Edit Facility Type': 'Uredi vrstu objekta', 'Edit Feature Class': 'Uredi klasu karakteristika', 'Edit Feature Layer': 'Uredi sloj karakteristika', 'Edit Flood Report': 'Uređivanje izvještaja o poplavi', 'Edit Gateway Settings': 'uredi postavke gatewy-a', 'Edit GPS data': 'Uredi GPS podatke', 'Edit Group': 'Uredi grupu', 'Edit Hazard': 'Uredi rizik', 'Edit Heliport': 'Uredi heliodrom', 'Edit Home': 'Uredi kuću', 'Edit Hospital': 'Uredi bolnicu', 'Edit Hours': 'Uredi sate', 'Edit Human Resource': 'Uredi ljudske resurse', 'Edit Identification Report': 'Uredi izvještaj o identifikacijama', 'Edit Identity': 'Uredi identitet', 'Edit Image Details': 'Uredi detalje slike', 'Edit Impact': 'Uredi utjecaj', 'Edit Impact Type': 'Uredi tip utjecaja', 'Edit Import File': 'Uredi uvezeni fajl', 'Edit Incident': 'Uredi incident', 'Edit Incident Report': 'Uredi izvještaj o incidentu', 'Edit Incident Type': 'Uredi tip incidenta', 'Edit Inventory Item': 'Uredi stavku zalihe ', 'Edit Item': 'Uredi stavku', 'Edit Item Catalog': 'Izmijeni stavku u katalogu', 'Edit Item Catalog Categories': 'Uredi kategorije stavki kataloga', 'Edit Item Category': 'Uredi kategoriju stavke', 'Edit Item in Request': 'Uredi stavku u zahtjevu', 'Edit Item Pack': 'Uredi paket stavki', 'Edit Item Packet': 'Uredi paket stavki', 'Edit Item Sub-Categories': 'Uredi podkategorije stavki', 'Edit Job': 'Uredi posao', 'Edit Job Role': 'Uredi opis posla', 'Edit Job Title': 'Uredi radno mjesto', 'Edit Key': 'Uredi ključ', 'Edit Keyword': 'Uredi ključnu riječ', 'Edit Kit': 'Uredi komplet', 'Edit L4': 'Da li urediti lokacije nivoa 4?', 'Edit L5': 'Da li urediti lokacije nivoa 4?', 'Edit Layer': 'Uredi sloj', 'Edit Level %d Locations?': 'Da li urediti lokacije nivoa %d ?', 'Edit Level 1 Assessment': 'Editovanje procjena Nivoa 1', 'Edit Level 2 Assessment': 'Izmjeni procjenu 2. nivoa', 'Edit Location': 'Uredi lokaciju', 'Edit Location Details': 'Uredi detalje lokacije', 'Edit Location Hierarchy': 'Uredi hijerarhiju lokacija', 'Edit Log Entry': 'Uredi unos zapisnika', 'Edit Logged Time': 'Uredi stavku zapisnika', 'Edit Mailing List': 'Uredi listu za slanje poruka', 'Edit Map Profile': 'Uredi konfiguraciju mape', 'Edit Map Services': 'Uredi usluge mape', 'Edit Marker': 'Uredi marker', 'Edit Membership': 'Uredi članstvo', 'Edit Message': 'Uredi poruku', 'Edit message': 'Uredi poruku', 'Edit Messaging Settings': 'Uredite postavke poruka', 'Edit Metadata': 'Uredi metapodatke', 'Edit Milestone': 'Uredi prekretnicu', 'Edit Mission': 'Izmjeni misiju', 'Edit Mobile Commons Settings': 'Uredi mobilne postavke', 'Edit Modem Settings': 'Uredi postavke modema', 'Edit Need': 'Uredi potrebu', 'Edit Need Type': 'Uredi tip potrebe', 'Edit Network': 'Uredi mrežu', 'Edit Note': 'Uredi napomenu', 'Edit Office': 'Uredi kancelariju', 'Edit Office Type': 'Uredi tip kancelarije', 'Edit Options': 'Izmjeni opcije', 'Edit Order': 'Uredi narudžbu', 'Edit Organization': 'Uredi organizaciju', 'Edit Organization Domain': 'Uredi domen organizacije', 'Edit Organization Needs': 'Uredi potrebe organizacije', 'Edit Organization Type': 'Uredi tip organizacije', 'Edit Output': 'Uredi izlaz', 'Edit Page': 'Uredi stranicu', 'Edit Parameters': 'Uredi parametre', 'Edit Parser Connection': 'Uredi parsersku konekciju', 'Edit Participant': 'Uredi učesnika', 'Edit Partner': 'Uredi partnera', 'Edit Partner Organization': 'Uredi partnersku organizaciju', 'Edit Patient': 'Uredi pacijenta', 'Edit Peer': 'Uredi suradnika', 'Edit Peer Details': 'Promjena detalja saradnika', 'Edit Permissions for %(role)s': 'Uredi dopuštenja %(role)s', 'Edit Person': 'Uredi osobu', 'Edit Person Details': 'Uredi detalje osobe', "Edit Person's Details": 'Uredi detalje o osobi', 'Edit Personal Effects Details': 'Uredi detalje ličnih uticaja', 'Edit Photo': 'Uredi fotografiju', 'Edit Pledge': 'Uredi podršku', 'Edit PoI Type': 'Uredi tačku interesa', 'Edit Point of Interest': 'Uredi tačku interesa', 'Edit Policy or Strategy': 'Uredi politiku ili strategiju', 'Edit Population Statistic': 'Izmjeni statistiku stanovništva', 'Edit Position': 'Uredi poziciju', 'Edit Post': 'Uredi blok ugradivog teksta', 'Edit Problem': 'Uredi problem', 'Edit Professional Experience': 'Uredi profesionalno iskustvo', 'Edit Profile': 'Izmijeni profil', 'Edit Profile Configuration': 'Uredi konfiguraciju profila', 'Edit Program': 'Uredi program', 'Edit Project': 'Uredi projekat', 'Edit Project Organization': 'Uredi organizaciju projekta', 'Edit Projection': 'Uredi projekciju', 'Edit Question Meta-Data': 'Uredi metapodatke pitanja', 'Edit Rapid Assessment': 'Uredi brzu procjenu', 'Edit Received Item': 'Uredi primljeni predmet', 'Edit Received Shipment': 'Uredite primljenu pošiljku', 'Edit Record': 'Uredi zapis', 'Edit Recovery Details': 'Izmijeni detalje pronalaženja', 'Edit Region': 'Uredi područje', 'Edit Registration': 'Uredi registraciju', 'Edit Registration Details': 'uredi detalje registracije', 'Edit Relative': 'Uredi srodnike', 'Edit Relief Item': 'Uredi stavku pomoći', 'Edit Repository Configuration': 'Uredi konfiguraciju repozitorija', 'Edit Request': 'Uredi zahtjev', 'Edit Request Details': 'Zatraži detalje o zahtjevu', 'Edit Request Item': 'Izmjeni stavku zahtjeva', 'Edit Request Template': 'Uredi predložak zahtjeva', 'Edit Requested Skill': 'Uredi tražene vještine', 'Edit Resource': 'Uredi resurs', 'Edit Resource Configuration': 'Uredi konfiguraciju resursa', 'Edit Resource Type': 'Uredi tip resursa', 'Edit Response Summary': 'Dodaj sumarni odgovor', 'Edit River': 'Izmjeni rijeku', 'Edit Role': 'Uredi ulogu', 'Edit roles for': 'Uredi uloge za', 'Edit Room': 'Uredi sobu', 'Edit RSS Settings': 'Uredi RSS Postavke', 'Edit saved search': 'Uredi sačuvanu pretragu', 'Edit Scenario': 'Izmijeni scenarij', 'Edit School District': 'Uredi školski rejon', 'Edit School Report': 'Izmijeni školski izvještaj', 'Edit Seaport': 'Uredi luku', 'Edit Sector': 'Uredi sektor', 'Edit Sender Priority': 'Uredi prioritet pošiljaoca', 'Edit Sent Item': 'Uredi poslani predmet', 'Edit Series': 'Uredi seriju', 'Edit Service': 'Uredi uslugu', 'Edit Setting': 'Uredi postavke', 'Edit Settings': 'Izmjeni postavke', 'Edit Shelter': 'Uredi sklonište', 'Edit Shelter Service': 'Uredi uslugu skloništa', 'Edit Shelter Status': 'Uredi status skloništa', 'Edit Shelter Type': 'Uredi tip skloništa', 'Edit Shipment Item': 'Uredi predmet pošiljke', 'Edit Shipment to Send': 'Uredi pošiljku za slanje', 'Edit Site Needs': 'Uredi potrebe mjesta', 'Edit Skill': 'Uredi vještinu', 'Edit Skill Equivalence': 'Uredi ekvivalenciju vještina', 'Edit Skill Provision': 'Uredi pružanje vještine', 'Edit Skill Type': 'Uredi tip vještine', 'Edit SMS': 'Uredi SMS', 'Edit SMS Outbound Gateway': 'Uredi SMS izlaz', 'Edit SMS Settings': 'Uredi SMS postavke', 'Edit SMTP to SMS Settings': 'Uredi SMTP-SMS postavke', 'Edit Solution': 'Uredi rješenja', 'Edit Source': 'Uredi izvor', 'Edit Staff': 'Izmijeni osoblje', 'Edit Staff Assignment': 'Uredi dodjelu osoblja', 'Edit Staff Member Details': 'Uredi detalje člana osoblja', 'Edit Staff Type': 'Izmijeni tip osoblja', 'Edit Status': 'Uredi status', 'Edit Status Report': 'Uredi statusni izvještaj', 'Edit Stock Count': 'Uredi zalihu skladišta', 'Edit Storage Bins': 'Uredi korpe za smještaj', 'Edit Storage Location': 'Uredi lokacije skladišta', 'Edit Subscription': 'Uredi pretplatu', 'Edit Subsector': 'Uredi podsektor', 'Edit Supplier': 'Uredi dobavljača', 'Edit Survey Answer': 'Uredi odgovor ankete', 'Edit Survey Question': 'Uredi pitanja upitnika', 'Edit Survey Series': 'Uredi niz anketa', 'Edit Survey Template': 'Dodaj šablon za anketu', 'Edit Symbology': 'Uredi značenje simbola', 'Edit Sync Settings': 'Izmieni postavke sinhronizacije', 'Edit Synchronization Settings': 'Uredi Postavke sinhronizacije', 'Edit Tag': 'Uredi oznaku', 'Edit Task': 'Uredi zadatak', 'Edit Team': 'Uredi tim', 'Edit Template Section': 'Uredi odjeljak predloška', 'Edit the OpenStreetMap data for this area': 'Uredi OpenStreetMap podatke za ovo područje', 'Edit Theme': 'Uredi temu', 'Edit Theme Data': 'Uredi podatke teme', 'Edit Themes': 'Uredi teme', 'Edit this Disaster Assessment': 'Obriši ovu procjenu katastrofe', 'Edit this entry': 'Obriši ovaj unos', 'Edit Ticket': 'Uredi karticu', 'Edit Tour': 'Uredi turu', 'Edit Track': 'Uredi praćenje', 'Edit Training': 'Uredi obuku', 'Edit Training Event': 'Uredi događaj obuke', 'Edit Tropo Settings': 'Uredi Tropo postavke', 'Edit Twilio Settings': 'Uredi Twilio postavke', 'Edit Twitter account': 'Uredi twitter nalog', 'Edit Twitter Search Query': 'Uredi Twitter upit za pretragu', 'Edit Unit': 'Uredi jedinicu', 'Edit User': 'Uredi korisnika', 'Edit Vehicle': 'Uredi vozilo', 'Edit Vehicle Assignment': 'Uredi dodjelu vozila', 'Edit Vehicle Details': 'Uredi detalje o vozilu', 'Edit Vehicle Type': 'Uredi tip vozila', 'Edit Volunteer Availability': 'Uredi Dostupnost Volontera', 'Edit Volunteer Cluster': 'Uredi skup volontera', 'Edit Volunteer Cluster Position': 'Uredi poziciju skupa volontera', 'Edit Volunteer Cluster Type': 'Uredi tip skupa volontera', 'Edit Volunteer Details': 'Uredi detalje skupa volontera', 'Edit Volunteer Role': 'Uredi ulogu volontera', 'Edit Warehouse': 'Uredi skladište', 'Edit Warehouse Item': 'Iredi stavku skladišta', 'Edit Warehouse Stock': 'Uredi zalihu skladišta', 'Edit Web API Settings': 'Uredi Web API postavke', 'Editable?': 'Izmjenjivo?', 'editor': 'uređivač', 'Education': 'Obrazovanje', 'Education Details': 'Detalji o obrazovanju', 'Education details added': 'Detalji o obrazovanju dodani', 'Education details deleted': 'Detalji o obrazovanju obrisani', 'Education details updated': 'Detalji o obrazovanju ažurirani', 'Education Level': 'Nivo obrazovanja', 'Education Level added': 'Nivo obrazovanja dodan', 'Education Level deleted': 'Nivo obrazovanja obrisan', 'Education Level updated': 'Nivo obrazovanja ažuriran', 'Education Levels': 'Nivoeiobrazovanja', 'Education materials received': 'Primljeni obrazovni materijali', 'Education materials, source': 'Edukacijski materijal, izvor', 'Effects Inventory': 'Popis efekata', 'Effort Report': 'Izvještaj o uloženom radu', 'eg. gas, electricity, water': 'npr. gas, struja, voda', 'Eggs': 'Jaja', 'Egypt': 'Egipat', 'Either a shelter or a location must be specified': 'Sklonište ili lokacija moraju biti specificirani', 'Either file upload or document URL required.': 'Ili prijenos datoteka ili URL dokumenta potreban.', 'Either file upload or image URL required.': 'Upload-ujte file ili URL zadane slike', 'Elderly person headed households (>60 yrs)': 'Domaćinstva vođena od strane starijih osoba (>60 yrs)', 'Electrical': 'Električno', 'Electrical, gas, sewerage, water, hazmats': 'Električni, plinski, kanalizacioni, vodeni, zaštita', 'Elevated': 'Uzdignuto', 'Elevators': 'Liftovi', 'Email': 'Elektronska pošta', 'Email (Inbound)': 'Elektronska pošta (dolazna)', 'Email Account deleted': 'Nalog elektronske pošte obrisan', 'Email Accounts': 'Nalozi e-pošte', 'Email Address': 'Email adresa', 'Email Address to which to send SMS messages. Assumes sending to phonenumber@address': 'Email adresa na koju treba poslati SMS poruke. Pretpostavlja se slanje na brojtelefona@adresa', 'Email created': 'Email kreiran', 'Email deleted': 'Email obrisan', 'Email Details': 'Detalji elektronske pošte', 'Email InBox': 'Ulaz e-pošte', 'Email Settings': 'Postavke e-pošte', 'Email Settings updated': 'Twilio postavke ažurirane', 'Email settings updated': 'Postavke email-a ažurirane', 'Embalming': 'Balzamovanje', 'Embassy': 'Ambasada', 'embedded': 'ugrađeno', 'Emergency Capacity Building project': 'Projekat hitne izgradnje kapaciteta', 'Emergency Contacts': 'Hitni kontakti', 'Emergency Department': 'Odjel za hitne slučajeve', 'Emergency Medical Services': 'Hitne medicinske službe', 'Emergency Shelter': 'Hitno sklonište', 'Emergency Support Facility': 'Objekat za podršku u hitnim slučajevima', 'Emergency Support Service': 'Služba za hitnu podršku', 'Emergency Telecommunications': 'Telekomunikacije u hitnim slučajevima', 'EMS Reason': 'Razlog za slanje hitne pomoći', 'EMS Status': 'Status hitne medicinske službe', 'EMS Status Reasons': 'Razlozi EMS statusa', 'EMS Traffic Status': 'Status EMS saobraćaja', 'Enable': 'Omogući', 'Enable in Default Config?': 'Uključiti u podrazumijevanoj konfiguraciji=', 'Enable/Disable Layers': 'Omogućite/Onemogućite slojeve', 'Enabled': 'Omogućen', 'Enabled?': 'Omogućeno?', 'Enabling MapMaker layers disables the StreetView functionality': 'Omogućavanje slojeva za izrađivanje karata onemogućuje funkcionalnosti StreetView-a', 'enclosed area': 'ograđeni prostor', 'End date': 'Krajnji datum', 'End Date': 'Završni datum', 'End date should be after start date': 'Krajnji datum mora biti nakon početnog', 'End of Period': 'Kraj Perioda', 'English': 'engleski', 'Enter a date before': 'Unesi datum prije', 'Enter a GPS Coord': 'Unesi GPS koordinate', 'Enter a location': 'Unesi lokaciju', 'Enter a name for the spreadsheet you are uploading (mandatory).': 'Unesite ime za proračunsku tablicu (spreadsheet) koju uploadate (obavezno).', 'Enter a name for the spreadsheet you are uploading.': 'Unesite ime za tabelarni prikaz koji učitavate', 'Enter a new support request.': 'Unesi novi zahtjev za podršku', 'Enter a number between %(min)g and %(max)g': 'Enter a number between %(min)g and %(max)g', 'enter a number between %(min)g and %(max)g': 'unesite broj između %(min)godina i %(max)godina', 'Enter a summary of the request here.': 'Unesi rezime zahtjeva ovdje', 'Enter a unique label!': 'Unesite jedinstvenu oznaku!', 'Enter a valid date before': 'Unesi validan datum prije', 'Enter a valid email': 'Unesite validan email', 'Enter a valid future date': 'Unesite validan datum u budućnosti', 'Enter a valid past date': 'Unesite valjan rok trajanja', 'Enter a valid phone number': 'Unesite važeći broj telefona', 'enter a value': 'unesite vrijednost', 'Enter a value carefully without spelling mistakes, this field needs to match existing data.': 'Unesite vrijednost pažljivo bez grešaka u kucanju, jer se ovo polje mora usaglastiti s postojećim podacima.', 'Enter an integer between %(min)g and %(max)g': 'Enter an integer between %(min)g and %(max)g', 'enter an integer between %(min)g and %(max)g': 'Unesi cijeli broj između %(min)g i %(max)g', 'Enter an integer greater than or equal to %(min)g': 'Enter an integer greater than or equal to %(min)g', 'Enter Completed Assessment': 'Unesi završenu ocjenu', 'Enter Completed Assessment Form': 'Unesi završen formular ocjene', 'Enter Coordinates in Deg Min Sec': 'Unesi kordinate u stepenima, minutama i sekundama', 'Enter Coordinates:': 'Unesi kordinate:', 'enter date and time': 'unesite datum i vrijeme', 'enter date and time in range %(min)s %(max)s': 'unesite datum i vrijeme u opsegu %(min)s %(max)s', 'enter date and time on or after %(min)s': 'unesi datum i vrijeme za %(min)s', 'enter date and time on or before %(max)s': 'unesi datum i vrijeme prije %(max)s', 'Enter phone number in international format like +46783754957': 'Unesite telefonski broj u internacionalnom formatu poput +46783754957', 'Enter some characters to bring up a list of possible matches': 'Unesite neke znakove kako biste pozvali listu mogucih poklapanja', 'Enter some characters to bring up a list of possible matches.': 'Upišite nekoliko početnih karaktera da biste vidjeli listu mogućih podudarnosti.', 'Enter tags separated by commas.': 'Unesite oznake odvojene zarezima.', 'Enter the data for an assessment': 'Unijeti podatke za procjenu', 'Enter the same password as above': 'Unesi istu lozinku kao iznad', 'Enter your first name': 'Unesite vaše ime ', 'Enter your firstname': 'Unesite svoje ime', 'Enter your organisation': 'Unesi svoju organizaciju', 'Enter your organization': 'Unesite vašu organizaciju', 'Entered': 'Uneseno', 'Entering a phone number is optional, but doing so allows you to subscribe to receive SMS messages.': 'Unos telefonskog broja je proizvoljan, ali ukoliko se odlučite da unesete možete se registrovati da primate SMS poruke.', 'Entity': 'jedinka', 'Entity Information': 'Informacije o jedinki', 'Entity Type': 'Tip entiteta', 'Entry added to Asset Log': 'Stavka dodana u zapisnik sredstava', 'Entry deleted': 'Unos izbrisan', 'Environment': 'Okruženje', 'Equatorial Guinea': 'Ekvatorijalna Gvineja', 'Equipment': 'Oprema', 'ER Status': 'Status hitne pomoći', 'ER Status Reason': 'Razlog ER statusa', 'Eritrea': 'Eritreja', 'Error encountered while applying the theme.': 'Desila se greška pri primjenjivanju teme.', 'Error in message': 'Greška u poruci', "Error logs for '%(app)s'": 'Zapisnici grešaka za "%(app)s"', 'Error reading file (invalid format?): %(msg)s': 'Greška čitanja datoteke (pogrešan format?): %(msg)s', 'Error sending message': 'Greška pri slanju poruke', 'Error sending message!': 'Greška pri slanju poruke!', 'Error Tickets': 'Kartice Grešaka', 'Errors': 'Greške', 'ESRI Shape File': 'ESRI datoteka likova', 'Essential Staff?': 'Suštinski bitno osoblje?', 'Est. Delivery Date': 'Procijenjeni datum isporuke', 'Estimated # of households who are affected by the emergency': 'Procijenjen broj domaćinstava koja su pogođena od nesreće', 'Estimated # of people who are affected by the emergency': 'Procjenjen broj ljudi koji su pogođeni krizom', 'Estimated Delivery Date': 'Procijenjeni Datum Isporuke', 'Estimated Overall Building Damage': 'Ukupna estimirana građevinska šteta', 'Estimated Reopening Date': 'Procijenjeni datum ponovnog otvaranja', 'Estimated total number of people in institutions': 'Procjenjen ukupan broj ljudi u institucijama', 'Estimated Value': 'Procjenjena vrijednost', 'Estimated Value per Pack': 'Procijenjena vrijednost po paketu', 'Estonia': 'Estonija', 'Ethiopia': 'Etiopija', 'Ethnicity': 'Nacionalnost', 'Euros': 'Eura', 'Evacuating': 'Evakuacija', 'Evacuation drills': 'Vježbe evakuacije', 'Evacuation is short-term whilst storm passing e.g. 12 hours, hence people need less space.': 'Evakuacija je kratkoročna do prolaska oluje, npr. 12 sati, stoga ljudima treba manje prostora.', 'Evacuation Route': 'Put evakuacije', 'Evaluate the information in this message. (This value SHOULD NOT be used in public warning applications.)': 'Procjeni informaciju u ovoj poruci.(Ova vrijednost NE BI TREBALA BITI korištena u javnim aplikacijama za upozorenje', 'Event': 'Dogadaj', 'Event added': 'Događaj dodan', 'Event deleted': 'Događaj obrisan', 'Event Details': 'Detalji događaja', 'Event Resource': 'Resurs događaja', 'Event Time': 'Vrijeme događaja', 'Event Type': 'Tip događaja', 'Event type': 'Tip događaja', 'Event Type added': 'Tip događaja dodan', 'Event Type Details': 'Detalji o vrsti događaja', 'Event Type removed': 'Tip događaja obrisan', 'Event Type updated': 'Tip događaja ažuriran', 'Event Types': 'Tipovi događaja', 'Event updated': 'Događaj ažuriran', 'Events': 'Događaji', 'Example': 'Primjer', 'Exceeded': 'Prekoračeno', 'Excellent': 'Odlično', 'Exclude contents': 'Isključi sadržaj', 'Excreta disposal': 'Sanitarni čvor', 'Execute a pre-planned activity identified in <instruction>': 'Izvrši unaprijed planiranu aktvinost identificiranu u <instrukciji>', 'Exercise': 'Vježba', 'EXERCISE': 'Vježba', 'Exercise?': 'Vježba?', 'Exercises mean all screens have a watermark & all notifications have a prefix.': 'Vježbe znače da svi ekrani imaju vodeni žig i sve obavijesti imaju isti prefiks.', 'Existing food stocks': 'Postojeće zalihe hrane', 'Existing food stocks, main dishes': 'Postojeće zalihe hrane, glavni artikli', 'Existing food stocks, side dishes': 'Postojeće zalihe hrane, pomoćni artikli', 'Existing location cannot be converted into a group.': 'Postojeća lokacija ne može biti pretvorena u grupu', 'Existing Placard Type': 'Postojeći tip plakata', 'Existing Sections': 'Postojeća odjeljenja', 'Exits': 'Izlazi', 'Expected In': 'Očekivano u', 'Expected Out': 'Očekivano', 'Expected Return Home': 'Očekivani povratak u dom', 'Experience': 'Iskustvo', 'Expiration Date': 'Datum isteka', 'Expiration Details': 'Detalji isteka', 'Expiration Report': 'Izvještaj o isteku', 'Expired': 'Istekao', 'Expired?': 'Istekao?', 'Expiring Staff Contracts Report': 'Izvještaj o osoblju kome ističe ugovor', 'Expiry (months)': 'Ističe (mjeseci)', 'Expiry date': 'Datum isteka', 'Expiry Date': 'Rok valjanosti', 'Expiry Date/Time': 'Vrijeme i datum isteka', 'Expiry Time': 'Vrijeme isteka', 'Explosive Hazard': 'Opasnost od eksplozije', 'Export': 'Izvezi', 'Export all Completed Assessment Data': 'Izvezi sve podatke o završenoj procjeni', 'Export as': 'Izvezi kao', 'export as csv file': 'izvezi kao CSV dokument', 'Export Data': 'Izvezi podatke', 'Export Database as CSV': 'Izvezi bazu podataka kao CSV', 'Export in %(format)s format': 'Izvoz u %(format)s formatu', 'Export in GPX format': 'Izvoz u GPX formatu', 'Export in KML format': 'Poslati u KML formatu', 'Export in OSM format': 'Izvoz u OSM formatu', 'Export in PDF format': 'Izvesti u PDF formatu', 'Export in RSS format': 'Eksportujte u RSS formatu', 'Export in XLS format': 'Izvesti u XLS formatu', 'Exterior and Interior': 'Vanjski i unutrašnji', 'Exterior Only': 'Samo vanjski dio', 'External Features': 'Spoljnje mogućnosti', 'Eye Color': 'Boja očiju', 'Eyebrows, Peculiarities': 'Obrve, specifičnosti', 'Eyebrows, Shape': 'Obrve, oblik', 'Eyebrows, Thickness': 'Obrve, debljina ', 'Eyes, Colour': 'Oči, boja', 'Eyes, Distance between Eyes': 'Oči, razmak između očiju', 'Eyes, Peculiarities': 'Oči, specifičnosti', 'Eyes, Shade': 'Oči, sjena', 'Face': 'Lice', 'Facebook': 'Facebook', 'Facial hair, color': 'Dlake po licu, boja', 'Facial hair, Colour': 'Dlake po licu, boja', 'Facial hair, comment': 'Dlake po licu, komentar', 'Facial hair, length': 'Dlake po licu, dužina', 'Facial hair, type': 'Dlake na licu, tip', 'Facial hair, Type': 'Dlake na licu, tip', 'Facial hear, length': 'Brada, dužina', "Facilitate uploading of missing person's photograph": 'Omogućite slanje fotografije nedostajuće osobe', 'Facilities': 'Objekti', 'Facility': 'Objekat', 'Facility added': 'Dodat objekat', 'Facility Contact': 'Kontakt vezan za objekat', 'Facility deleted': 'Obrisan objekat', 'Facility Details': 'Detalji objekta', 'Facility Operations': 'Aktivnosti objekta', 'Facility or Location': 'Objekat / Lokacija', 'Facility removed': 'Objekat uklonjen', 'Facility Status': 'Stanje objekta', 'Facility Type': 'Vrsta objekta', 'Facility Type added': 'Vrsta objekta dodana', 'Facility Type deleted': 'Vrsta objekta obrisana', 'Facility Type Details': 'Detalji o vrsti objekta', 'Facility Type updated': 'Vrsta objekta ažurirana', 'Facility Types': 'Vrste objekata', 'Facility updated': 'Ažuriran objekat', 'Factors affecting school attendance': 'Faktori koji utiču na pohađanje škole', 'Fail': 'Neuspjeh', 'Failed': 'Nije uspjelo', 'Failed!': 'Nije uspjelo!', 'Fair': 'Pošteno', 'Falling Object Hazard': 'Opasnost od padajućih objekata', 'Families/HH': 'Porodice/HH', 'Family': 'Porodica', 'Family Care': 'Porodična briga', 'Family tarpaulins received': 'Cerade za porodicu primljene', 'Family tarpaulins, source': 'Porodične cerade, izvor', 'Family/friends': 'Porodica/prijatelji', 'Farmland/fishing material assistance, Rank': 'Materijalna pomoć za obradu zemlje/ribolov , rang', 'fat': 'mast', 'Fatalities': 'Ljudske žrtve', 'FAX': 'FAKS', 'Fax': 'Faks', 'Feature Class': 'Klasa karakteristika', 'Feature Class added': 'Klasa karakteristika dodana', 'Feature Class deleted': 'Obrisana klasa karakteristika', 'Feature Class Details': 'Detalji klase karakteristika', 'Feature Class updated': 'Klasa karakteristika ažurirana', 'Feature Classes': 'Klase karakteristika', 'Feature Classes are collections of Locations (Features) of the same type': 'Klase karakteristika su kolekcije lokacija (karakteristika) istog tipa', 'Feature Group': 'Grupa karakteristika', 'Feature Group added': 'Dodan grupa karakteristika', 'Feature Group deleted': 'Grupna karakteristika izbrisana', 'Feature Group Details': 'Detalji grupe karakteristika', 'Feature Group Updated': 'Grupa karakteristika ažurirana', 'Feature Group updated': 'Grupa karakteristika ažurirana', 'Feature Groups': 'Grupe karakteristika', 'Feature Info': 'Informacije o karakteristici', 'Feature Layer': 'Sloj karakteristika', 'Feature Layer added': 'Dodat sloj karakteristika', 'Feature Layer deleted': 'Obrisan sloj karakteristika', 'Feature Layer Details': 'Detalji sloja karakteristika', 'Feature Layer updated': 'Ažuriran sloj karakteristika', 'Feature Layers': 'Slojevi karakteristika', 'Feature Namespace': 'Imenik karakteristika', 'Feature Request': 'Zahtjev za karakteristikama', 'Feature Type': 'Tip karakteristike', 'Features Include': 'Karakteristike uključuju', 'feedback': 'povratna informacija', 'Feedback': 'Povratna informacija', 'Feet, Condition': 'Stopalo, stanje', 'Feet, Nails': 'Stopala, nokti', 'Feet, Shape': 'Stopalo, oblik', 'Female': 'Žensko', 'female': 'žensko', 'Female headed households': 'Domaćinstva u kojim je žena glava porodice', 'Few': 'Mali broj', 'Field': 'Terenski', 'Field Hospital': 'Poljska bolnica', 'Fields tagged with a star': 'Polja označena zvjezdicom', 'Fiji': 'Fidži', 'File': 'Datoteka', 'File Imported': 'Datoteka je unesena', 'File Importer': 'Uvoz datoteka', 'File name': 'Ime datoteke', 'File not found': 'Datoteka nije pronađena', 'File uploaded': 'Datoteka poslana', 'Files': 'Datoteke', 'Fill in Latitude': 'Dopuni geografsku širinu', 'Fill in Longitude': 'Upišite geografsku dužinu', 'fill in order: day(2) month(2) year(4)': 'popuni redoslijedom: dan(2) mjesec(2) godina(4)', 'fill in order: hour(2) min(2) day(2) month(2) year(4)': 'popuni redoslijedom: sat(2) min(2) dan(2) mjesec(2) godina(4)', 'fill in order: hour(2) min(2) month(2) day(2) year(4)': 'popuni redoslijedom: sat(2) min(2) mjesec(2) dan(2) godina(4)', 'fill in order: month(2) day(2) year(4)': 'popuni redoslijedom: mjesec(2) dan(2) godina(4)', 'Filter': 'Filtriraj', 'Filter by %(type)s': 'Filtriraj po %(type)s', 'Filter by Bookmark': 'Filtriraj po zabilješci', 'Filter by Category': 'Filtriraj po kategoriji', 'Filter by Country': 'Filtriraj po državi', 'Filter by Date': 'Filtriraj po datumu', 'Filter by Disaster': 'Filtriraj po katastrofi', 'Filter by Location': 'Filtriraj po lokaciji', 'Filter by Organization': 'Filtriraj po organizaciji', 'Filter by Status': 'Filtriraj po statusu', 'Filter by Tag': 'Filtriraj po oznaci', 'Filter by Type': 'Filtriraj po tipu', 'Filter Field': 'Polje filtera', 'Filter Options': 'Opcije filtera', 'Filter Tweets by the date they were tweeted on': 'Filtriraj Tweet po danu kada su navedeni', 'Filter Tweets by who tweeted them': 'Filtriraj Tweet po osobama koje su unijele', 'Filter type': 'Tip Filtera', 'Filter Value': 'Filter vrijednosti', 'Filtered search of aid pledges and requests': 'Filtrirana pretraga ponude i potražnje pomoći', 'Filters': 'Filteri', 'final report': 'završni izvještaj', 'Find': 'Pronađi', 'Find a Person Record': 'Nađite zapis o osobi', 'Find by Name': 'Nađi po imenu', 'Find Dead Body Report': 'Pronađi izvještaj o mrtvim osobama', 'Find Details': 'Nađi detalje', 'Find Hospital': 'Pronađi bolnicu', 'Find more': 'Nađi više', 'Find on Map': 'Nađi na karti', 'Find Person Record': 'Pronađi zapis osobe', 'Find Recovery Report': 'Nađi Izvještaj o pronalaženju', 'Find Report added': 'Dodat izvjestaj o traženju', 'Find Report deleted': 'Izvještaj o traženju izbrisan', 'Find Report updated': 'Traženi izvještaj ažuriran', 'Find Volunteers': 'Pronađi volontere', 'Finder': 'Pronalazač', 'Fingerprint': 'Otisak prsta', 'Fingerprinting': 'Uzimanje otiska prsta', 'Fingerprints': 'Otisci', 'Finish': 'Završetak', 'Finished Jobs': 'Gotovi zadaci', 'Finland': 'Finska', 'Fire': 'Vatra', 'Fire Fighter Forest Vehicle': 'Vatrogasno šumsko vozilo', 'Fire Fighter Light Vehicle': 'Vatrogasno lako vozilo', 'Fire Fighter Rural Vehicle': 'Vatrogasno seosko vozilo', 'Fire Fighter Special Vehicle': 'Vatrogasno specijalno vozilo', 'Fire Fighter Urban Vehicle': 'Vatrogasno gradsko vozilo', 'Fire Station': 'Vatrogasna stanica', 'Fire suppression and rescue': 'Suzbijanje vatre i spašavanje', 'First': 'Prvi', 'First name': 'Ime', 'First Name': 'Ime', 'Fishing': 'Ribolov', 'Flash Flood': 'Nagla poplava', 'Flash Freeze': 'Brzo zamrzavanje', 'flatfooted': 'dustabanlija', 'Flexible Impact Assessments': 'Fleksibilna procjena uticaja', 'Flood': 'Poplava', 'Flood Alerts': 'Uzbune od poplava', 'Flood Alerts show water levels in various parts of the country': 'Alarmi poplava pokazuju vodostaje u različitim dijelovima države', 'Flood Depth': 'Dubina poplave', 'Flood Report': 'Izvještaj o poplavi', 'Flood Report added': 'Izvještaj o poplavi dodan', 'Flood Report deleted': 'Izvještaj o poplavi izbrisan', 'Flood Report Details': 'Detalji izvještaja o poplavi', 'Flood Report updated': 'Izvještaj o Poplavi ažuriran', 'Flood Reports': 'Izvještaji o poplavama', 'Flooding': 'Poplava', 'Flow Status': 'Status toka', 'flush latrine with septic tank': 'Očisti zahod i septičku jamu', 'Focal Person': 'Poznata osoba', 'Focal Point': 'Tačka fokusa', 'Fog': 'Magla', 'Folder': 'Mapa', 'Food': 'Hrana', 'Food assistance': 'Pomoć u hrani', 'Food assistance available/expected': 'Pomoć u hrani primljena/očekivana', 'Food security ': 'Sigurnost hrane', 'Food Supply': 'Zalihe hrane', 'food_sources': 'izvori hrane', 'Footer': 'Zaglavlje na dnu strane', 'Footer file %s missing!': 'Nedostaje datoteka zaglavlja %s!', 'For': 'Za', 'For a country this would be the ISO2 code, for a Town, it would be the Airport Locode.': 'Za zemlju to će biti ISO2 kod, za grad, to bi bio Locode aerodroma', 'For each sync partner, there is a default sync job that runs after a specified interval of time. You can also set up more sync jobs which could be customized on your needs. Click the link on the right to get started.': 'Za svakog sinhronizovanog partnera , postoji zadani sinhronizovani posao nakon određenog vremenskog intervala . Takođe možete postaviti više sinhronizovanih poslova koji mogu biti prilagođeni prema vašim potrebama . Kliknite link nadesno da počnete.', 'For Eden instances enter the application base URL, e.g. http://sync.sahanfoundation.org/eden, for other peers the URL of the synchronization interface.': 'Za instalacije Eden platforme unesite URL bazne organizacije, npr. http://sync.sahanfoundation.org/eden, za druge učesnike URL sinhronizacijskog interfejsa.', 'For enhanced security, you are recommended to enter a username and password, and notify administrators of other machines in your organization to add this username and password against your UUID in Synchronization -> Sync Partners': 'Za povećanu sigurnost, preporučljivo je upisati korisničko ime i šifru, te obavijestiti administratora ostalih mašina u Vašoj organizaciji da doda to korisničko ime i šifru preko Vašeg UUID u Sinhronizacija -> Sinhronizacijski partneri', 'For Entity': 'Za jedinku', 'For live help from the Sahana community on using this application, go to': 'Ako trebate pomoć pri korištenju ove aplikacije od strane Sahana zajednice, idite na', 'For messages that support alert network internal functions': 'Za poruke koje podržavaju interne funkcije mreža za uzbunjivanje', 'For more details on the Sahana Eden system, see the': 'Za vise detalja o Sahana Eden sistemu, pogledati', 'For more details on the Sahana system, see the': 'Za vise detalja o Sahana sistemu, pogledati', 'For more information, see': 'Za više informacija, pogledaj', 'For more information, see ': 'Za više informacija, pogledajte ', 'For POP-3 this is usually 110 (995 for SSL), for IMAP this is usually 143 (993 for IMAP).': 'Za POP-3 ovo je obično 110 (995 za SSL), za IMAP ovo je obično 143 (993 za IMAP).', 'For:': 'Za:', 'forehead': 'čelo', 'Forehead, Height': 'Čelo, visina', 'Forehead, Inclination': 'Čelo, nagib', 'Forehead, Width': 'Čelo, širina', 'Forest Fire': 'Šumski požar', 'Forest Tank Tactical Vehicle': 'Šumska taktička pokretna cisterna', 'form data': 'podaci formulara', 'Form Settings': 'Postavke obrasca', 'Formal camp': 'Formalni kamp', "Format the list of attribute values & the RGB value to use for these as a JSON object, e.g.: {Red: '#FF0000', Green: '#00FF00', Yellow: '#FFFF00'}": "Oblikujte popis atributa i RGB vrijednosti da bi se koristile kao JSON objekt, npr.: {Crvena: '#FF0000', Zelena: '#00FF00 ', Žuta: '#FFFF00 '}", 'Forms': 'Formulari', 'Found': 'Pronađeno', 'found': 'nađeno', 'Foundations': 'Osnove', 'Freezing Drizzle': 'ledeno rominjanje', 'Freezing Rain': 'Ledena kiša', 'Freezing Spray': 'ledena kiša', 'Freight company or organisation providing transport': 'Transportno preduzeće ili organizacija koja pruža transport', 'French': 'Francuski', 'Frequency': 'Učestanost', 'Friday': 'Petak', 'From': 'Od', 'From %(site)s': 'Sa %(site)s', 'From Facility': 'Iz objekta', 'From Inventory': 'Iz inventara', 'From Location': 'Sa lokacije', 'From Organization': 'Od organizacije', 'From Person': 'Od osobe', 'from Twitter': 'sa Twittera', 'Frost': 'Mraz', 'Fuel': 'Gorivo', 'Fulfil. Status': 'Ispuni status', 'Fulfill Status': 'Ispuni status', 'Fulfillment Status': 'Status realizacije', 'full': 'puno', 'Full': 'Potpun', 'Full beard': 'Puna brada', 'Fullscreen Map': 'Mapa punog ekrana', 'Function': 'Funkcija', 'Function name': 'Ime funkcije', 'Function Permissions': 'Funkcijske dozvole', 'Function tour is activated': 'Tura funkcije je aktivirana', 'Functions available': 'Dostupne funkcije', 'Funding': 'Fondovi', 'Funding Organisation': 'Osnivačka organizacija', 'Funding Organization': 'Osnivačka organizacija', 'Funding Report': 'Izvještaj o fondovima', 'Funds Contributed': 'Doprinos fondovima', 'Funeral': 'Sahrana', 'Further Action Recommended': 'Preporučljive su daljnje akcije', 'Gale Wind': 'Jak vjetar', 'Gap Analysis': 'Analiza propusta', 'Gap Analysis Map': 'Karta analize propusta', 'Gap Analysis Report': 'Izvještaj o analizi pukotina', 'Gap Map': 'Karta sa pukotinama', 'Gap Report': 'Izvještaj propusta', 'Gas Supply Left (in hours)': 'Preostala zaliha goriva (u satima)', 'Gas Supply Type': 'Vrsta zaliha goriva', 'Gateway': 'Mrežni izlaz', 'Gateway Settings': 'Postavke mrežnog izlaza', 'Gateway settings updated': 'Postavke mrežnog izlaza ažurirane', 'Gender': 'Spol', 'General': 'Općenito', 'General Comment': 'Generalni komentar', 'General emergency and public safety': 'Opće opasnosti i javna sigurnost', 'General information on demographics': 'Opšte demografske informacije', 'General Medical/Surgical': 'Opće zdravstveno / hirurško', 'General Person Transportation Vehicle': 'Transportno vozilo opšte namjene', 'General Skills': 'Opšte vještine', 'Generate portable application': 'Generiši prenosivu aplikaciju', 'Geocode': 'Geokod', 'Geocoder Selection': 'Izbor geokodera', 'GeoJSON Layer': 'GeoJSON sloj', 'Geometry Name': 'Geometrijski naziv', 'Geonames.org search requires Internet connectivity!': 'Geonames.org pretraga zahtijeva Internet vezu!', 'Geophysical (inc. landslide)': 'Geofizički (ink. klizište)', 'Georgia': 'Gruzija', 'GeoRSS Layer': 'GeoRSS sloj', 'Geotechnical': 'Geotehnički', 'Geotechnical Hazards': 'Geotehničke opasnosti', 'Geraldo module not available within the running Python - this needs installing for PDF output!': 'Modul Geraldo nije dostupan unutar pokrenutog Python-a, ovo zahtijeva instalaciju PDF izlaza!', 'Geraldo module not available within the running Python - this needs installing to do PDF Reporting!': 'Modul Geraldo nije dostupan unutar pokrenutog Python-a, ovo zahtijeva instalaciju PDF izlaza!', 'German': 'njemački', 'Germany': 'Njemačka', 'Get Feature Info': 'Dobavi informacije o karakteristici', 'Get incoming recovery requests as RSS feed': 'Dobijte dolazeće zahtjeve za oporavak kao RSS feed', 'getting': 'uzimajući', 'Ghana': 'Gana', 'Girls 13-18 yrs in affected area': 'Djevojčice 13-18 god u pogođenom području', 'Girls 13-18 yrs not attending school': 'Djevojčice 13-18 godina koji ne pohađaju školu', 'Girls 6-12 yrs in affected area': 'Djevojčice 6-12 godina u zahvaćenim područjima', 'Girls 6-12 yrs not attending school': 'Djeviojčice 6-12 godina koje ne pohađaju školu', 'GIS integration to view location details of the Shelter': 'GIS integracija za pregled detalja lokacije skloništa', 'GIS Reports of Shelter': 'GIS Izvještaji skloništa', 'Give a brief description of the image, e.g. what can be seen where on the picture (optional).': 'Dajte kratak opis fotografije, npr. šta se gdje može vidjeti na slici (nije obavezno).', 'Give information about where and when you have seen the person': 'Dajte informaciju o tome gdje i kada ste vidjeli ovu osobu', 'Give information about where and when you have seen them': 'Dajte informaciju o tome gdje i kada ste ih vidjeli', 'Global Messaging Settings': 'Globalna Podešavanje Poruka', 'Go': 'Idi', "Go to %(url)s, sign up & then register your application. You can put any URL in & you only need to select the 'modify the map' permission.": "Idite na %(url)s, prijavite se i registrujte vašu aplikaciju. Možete unijeti neki URL i tada samo trebati izmijeniti dozvole za 'mijenjanje mape'.", 'Go to Request': 'Idi na zahtjev', 'Goatee': 'Kozja bradica', 'Good': 'Dobro', 'Good Condition': 'Dobro stanje', 'Goods Received Note': 'Napomena o prijemu robe', 'Google Layer': 'Google sloj', "Google Layers cannot be displayed if there isn't a valid API Key": 'Google Layers ne mogu biti prikazani ukoliko ne postoji validan API ključ', 'Government': 'Vlada', 'Government building': 'Zgrada Vlade', 'Government UID': 'JMB', 'Government UUID': 'JMB', 'GPS Data': 'GPS podaci', 'GPS data': 'GPS podaci', 'GPS data added': 'GPS podaci dodani', 'GPS data deleted': 'GPS podaci obrisani', 'GPS data updated': 'GPS podaci ažurirani', 'GPS Track': 'GPS praćenje', 'GPS Track File': 'Datoteka GPS praćenja', 'GPX Layer': 'GPX SLOJ', 'GPX Track': 'GPX staza', 'Grade': 'Ocjena', 'Graph': 'Grafikon', 'Graph Model': 'Model grafa', 'Great British Pounds': 'Britanske funte', 'Greater than 10 matches. Please refine search further': 'Više od 10 poklapanja. Molim napravite precizniju pretragu', 'Greece': 'Grčka', 'Greek': 'Grčki', 'green': 'zelena', 'Green': 'Zeleno', 'grey': 'siva', 'Grid': 'Mreža', 'Ground movement, fissures': 'Kretanje tla, pukotine', 'Ground movement, settlement, slips': 'Pokreti zemljišta, naselja, klizišta', 'Group': 'Grupa', 'Group added': 'Grupa dodana', 'Group deleted': 'Grupa obrisana', 'Group description': 'Opis grupe', 'Group Description': 'Opis grupe', 'Group Details': 'Detalji o grupi', 'Group Head': 'Glavna osoba grupe', 'Group ID': 'IB grupe', 'Group Leader': 'Vođa grupe', 'Group Member added': 'Dodan član grupe', 'Group Members': 'Članovi grupe', 'Group Membership added': 'Dodano članstvo grupe', 'Group Membership deleted': 'Grupno članstvo izbrisano', 'Group Membership Details': 'Detalji grupnog članstva', 'Group Membership updated': 'Grupno članstvo ažurirano', 'Group Memberships': 'Grupna članstva', 'Group name': 'Ime grupe', 'Group Name': 'Ime grupe', 'Group Title': 'Naslov Grupe', 'Group Type': 'Vrsta grupe', 'Group updated': 'Grupa ažurirana', 'Group Updated': 'Ažurirana grupa', 'Grouped by': 'Grupisano po', "Grouping by 'Family Unit' or other group category": 'Grupisanje po porodičnoj jedinici ili drugoj kategoriji grupe', 'Groups': 'Grupe', 'Groups removed': 'Grupa odstranjena', 'Guatemala': 'Gvatemala', 'Guest': 'Gost', 'Guided Tour Functionality': 'Funkcionalnost vođene ture', 'Guided Tours': 'Vođene ture', 'Guinea': 'Gvineja', 'Guinea-Bissau': 'Gvineja-Bisau', 'Guyana': 'Gvajana', 'Hail': 'Gräd', 'Hair Color': 'Boja kose', 'Hair Comments': 'Komentari na kosu', 'Hair Length': 'Dužina kose', 'Hair of the head, Baldness (extent)': 'Kosa na glavi, ćelavost (veličina)', 'Hair of the head, Baldness (location)': 'Kosa na glavi, ćelavost (mjesto)', 'Hair of the head, Colour': 'Kosa, boja', 'Hair of the head, Length': 'Kosa, dužina', 'Hair of the head, Parting': 'Kosa, razdjeljak', 'Hair of the head, Shade of colour': 'Kosa na glavi, nijansa boje', 'Hair of the head, Style': 'Kosa, stil', 'Hair of the head, Thickness': 'Kosa, debljina', 'Hair of the head, Type': 'Kosa, tip', 'Hair Style': 'Frizura', 'Hair-piece': 'Dlaka', 'Hands, Nail length': 'Ruke, dužina noktiju', 'Hands, Nail peculiarities': 'Ruke, specifičnosti noktiju', 'Hands, Nicotine': 'Ruke, nikotin', 'Hands, Shape': 'Ruke, oblik', 'Hands, Size': 'Ruke, veličina', 'Has data from this Reference Document been entered into Sahana?': 'Da li su podaci iz ovog referentnog dokumenta uneseni u Sahanu?', 'Has only read-only access to records relating to this Organization or Site.': 'Da li ima pristup samo za čitanje vezan za ovu organizaciju ili mjesto?', 'Has the %(GRN)s (%(GRN_name)s) form been completed?': 'Da li je formular %(GRN)s (%(GRN_name)s) ispunjen?', 'Has the Certificate for receipt of the shipment been given to the sender?': 'Da li je pošiljalac primio certifikat o prijemu isporuke?', 'Has the GRN (Goods Received Note) been completed?': 'Da li su BPR (Bilješke o Primljenoj Robi) popunjene?', 'Has your business been damaged in the course of the disaster?': 'Da li je vaše posao oštećen usljed katastrofe?', 'Have normal food sources been disrupted?': 'Da li su normalni izvori hrane oštećeni?', 'Hazard': 'Rizik', 'Hazard added': 'Rizik dodan', 'Hazard added to Project': 'Rizik dodan u projekat', 'Hazard deleted': 'Rizik obrisan', 'Hazard Details': 'Detalji rizika', 'Hazard Pay': 'Rizično plaćanje', 'Hazard removed from Project': 'Rizik uklonjen sa projekta', 'Hazard updated': 'Rizik ažuriran', 'Hazardous Material': 'Opasan materijal', 'Hazardous Road Conditions': 'Opasni uslovi na putu', 'Hazards': 'Rizici', 'Head': 'Glava', 'Head form, front': 'Oblik glave, prednji', 'Head form, profile': 'Oblik glave, profil', 'Header Background': 'Pozadina zaglavlja', 'Header background file %s missing!': 'Pozadinska datotka zaglavlja %s nedostaje!', 'Headquarters': 'Glavno sjedište', 'Health': 'Zdravlje', 'Health care assistance, Rank': 'Pomoć zdravstvene zaštite, stepen', 'Health center': 'Zdravstveni centar', 'Health center with beds': 'Zdravstveni centar sa krevetima', 'Health center without beds': 'Zdravstveni centar bez kreveta', 'Health Org UUID': 'Identifikacijski broj zrdavstvene organizacije', 'Health services functioning prior to disaster': 'Zdravstvene usluge koje su funkcionisale prije katastrofe', 'Health services functioning since disaster': 'Zdravstvene usluge koje djeluju nakon katastrofe-', 'Health services status': 'Status zdravstvenih usluga', 'Healthcare Worker': 'Zdravstveni radnik', 'Heat and Humidity': 'Toplota i Vlažnost', 'Heat Wave': 'Toplotni talas', 'heavy': 'težak', 'Height': 'Visina', 'Height (cm)': 'Visina (cm)', 'Height (m)': 'visina (m)', 'Helipad Information': 'Informacije o helikopterskom sletištu', 'Heliport': 'Heliodrom', 'Heliport added': 'Heliodrom dodan', 'Heliport deleted': 'Heliodrom obrisan', 'Heliport Details': 'Detalji heliodroma', 'Heliport updated': 'Heliodrom ažuriran', 'Heliports': 'Heliodromi', 'Help': 'Pomoć', 'Helps to monitor status of hospitals': 'Pomaže pri praćenju statusa bolnica', 'Helps to report and search for Missing Persons': 'Pomaže pri izvještavanju i traženju nestalih osoba', 'Helps to report and search for missing persons': 'Pomaže pri prijavljivanju i traženju nestalih osoba', 'here': 'ovdje', 'Here are the solution items related to the problem.': 'Ovdje su predmeti rješenja povezani sa problemom.', 'Heritage Listed': 'Izlistano nasljeđe', 'HFA Priorities': 'HFA Prioriteti', 'HFA1: Ensure that disaster risk reduction is a national and a local priority with a strong institutional basis for implementation.': 'HFA1: Osigurajte da je smanjenje rizika od katastrofe državni i lokalni prioritet i jaka institucionalna baza za implementaciju.', 'HFA2: Identify, assess and monitor disaster risks and enhance early warning.': 'HFA2: Prepoznati, procijeniti i pratiti rizike od katastrofe i pojačati rano upozoravanje', 'HFA3: Use knowledge, innovation and education to build a culture of safety and resilience at all levels.': 'HFA3: Koristiti znanjem inovacije i obrazovanje da se sagradi bezbjednosna kultura na svim nivoima.', 'HFA4: Reduce the underlying risk factors.': 'HFA4: Smanjiti podložne faktore rizika.', 'HFA5: Strengthen disaster preparedness for effective response at all levels.': 'HFA5: Pojačati spremnost za katastrofe za efikasan odgovor na svim nivoima.', 'Hide': 'Sakrij', 'Hide Table': 'Sakrij tabelu', 'Hierarchy': 'Hijerarhija', 'Hierarchy Level 0 Name (i.e. Country)': 'Ime nultog hijerarhijskog nivoa (države)', 'Hierarchy Level 1 Name (e.g. State or Province)': 'Ime prvog nivoa hijerarhije (npr. savezna država/republika/pokrajina)', 'Hierarchy Level 2 Name (e.g. District or County)': 'Ime drugog nivoa hijerarhije (npr. kanton/regija)', 'Hierarchy Level 3 Name (e.g. City / Town / Village)': 'Ime trećeg nivoa hijerarhije (npr. grad/opština/selo)', 'Hierarchy Level 4 Name (e.g. Neighbourhood)': 'Ime četvrtog nivoa hijerarhije (npr. susjedstvo/zaseok)', 'Hierarchy Level 5 Name': 'Ime petog nivoa hijerarhije', 'high': 'visoko', 'High': 'Visok', 'High Tide Depth': 'Dubina visoke plime', 'High Water': 'Najveći vodostaj', 'Highest Priority Open Requests': 'Najviši prioritet za otvorene zahtjeve', 'highly critical': 'Vrlo kritično', 'History': 'Istorija', 'Hit the back button on your browser to try again.': "Stisnite 'Nazad' na vašem pretraživaču da pokušte ponovo.", 'Holiday Address': 'Adresa za vrijeme odmora', 'Home': 'Početak', 'Home added': 'Kuća dodana', 'Home Address': 'Kućna adresa', 'Home City': 'Grad stanovanja', 'Home Country': 'Matična država', 'Home Crime': 'Kućni kriminal', 'Home deleted': 'Kuća izbrisana', 'Home Details': 'Kućni detalji', 'Home Phone': 'Kućni telefon', 'Home phone': 'Kućni telefon', 'Home Phone Number': 'Kućni telefon', 'Home Relative': 'Kućni srodnik', 'Home updated': 'Ažuriran dom', 'Homes': 'Kuće', 'horizontal': 'horizontalno', 'Hospital': 'Bolnica', 'Hospital Details': 'Pojedinosti bolnice', 'Hospital information added': 'Dodana informacija o bolnici', 'Hospital information deleted': 'Obrisana informacija o bolnici', 'Hospital information updated': 'Ažurirana informacija o bolnici', 'Hospital Management': 'Upravljanje bolnicom', 'Hospital status assessment.': 'Procjena stanja bolnice.', 'Hospital Status Report': 'Izvještaj o statusu bolnice', 'Hospitals': 'Bolnice', 'Host': 'Domaćin', 'Hot Spot': 'Kritična tačka', 'Hour': 'Sat', 'Hourly': 'Svaki sat', 'hourly': 'svaki sat', 'hours': 'sati', 'Hours': 'Sati', 'Hours added': 'Sati dodani', 'Hours by Program Report': 'Sati po programskom izvještaju', 'Hours by Role Report': 'Sati po izvještaju o ulogama', 'Hours deleted': 'Sati obrisani', 'Hours Details': 'Detalji sati', 'Hours updated': 'Sati ažurirani', 'Household kits received': 'Kompleti za domaćinstva primljeni', 'Household kits, source': 'Kućanski kompleti, izvor', 'households': 'domaćinstva', 'How data shall be transferred': 'Kako podaci trebaju biti preneseni', 'How did boys 13-17yrs spend most of their time prior to the disaster?': 'Kako su dječaci 13-17 god. provodili većinu vremena prije katastrofe?', 'How did boys <12yrs spend most of their time prior to the disaster?': 'Kako su dječaci <12 god. provodili većinu vremena prije katastrofe?', 'How did boys girls 13-17yrs spend most of their time prior to the disaster?': 'Kako su mladići i djevojke 13-17 godina provodili većinu vremena prije katastrofe', 'How did girls <12yrs spend most of their time prior to the disaster?': 'Kako su djevojčice <12 god. provodili većinu vremena prije katastrofe?', 'How do boys 13-17yrs spend most of their time now?': 'Kako mladići 13-17 godina sada provode većinu vremena?', 'How do boys <12yrs spend most of their time now?': 'Kako dječaci <12yrs sada provode većinu vremena?', 'How do girls 13-17yrs spend most of their time now?': 'Kako djevojke 13-17 godina sada provode većinu vremena?', 'How do girls <12yrs spend most of their time now?': 'Kako djevojčice <12yrs sada provode većinu vremena?', 'How does it work?': 'Kako ovo radi?', 'How is this person affected by the disaster? (Select all that apply)': 'Kako je osoba pogođena katastrofom? (Odaberite sve što se može primjeniti)', 'How local records shall be updated': 'Kako se lokalni zapisi trebaju ažurirati', 'How long will the food last?': 'Koliko dugo će hrana trajati?', 'How long will this water resource last?': 'Koliko dugo će ovaj resurs vode trajati?', 'How many Boys (0-17 yrs) are Dead due to the crisis': 'Koliko dječaka (starosti od 0 do 17 god) je mrtvo usljed trenutne krize', 'How many Boys (0-17 yrs) are Injured due to the crisis': 'Koliko dječaka (0-17 godina) je povrijeđeno zbog krize', 'How many Boys (0-17 yrs) are Missing due to the crisis': 'Koliko dječaka (0 - 17 godina) je nestalo tokom krize', 'How many days will the supplies last?': 'Koliko dana će trajati zalihe?', 'How many doctors in the health centers are still actively working?': 'Koliko ljekara u zdravstvenim centrima još aktivno radi?', 'How many Girls (0-17 yrs) are Dead due to the crisis': 'Koliko djevojaka (0-17godina) je umrlo tokom ove krize', 'How many Girls (0-17 yrs) are Injured due to the crisis': 'Koliko djevojki (0-17 god.) je ozlijeđeno zbog nepogode', 'How many Girls (0-17 yrs) are Missing due to the crisis': 'Koliko djevojčica (0-17 god) je nestalo uslijed krize', 'How many houses suffered damage but remain usable (usable = windows broken, cracks in walls, roof slightly damaged)?': 'Koliko je kuća oštećeno ali još upotrebljivo (upotrebljivo = razbijeni prozori, pukotine u zidu, krov malo oštećen)?', 'How many latrines are available in the village/IDP centre/Camp?': 'Koliko zahoda je dostupan u selu/centru/kampu?', 'How many Men (18 yrs+) are Dead due to the crisis': 'Koliko muškaraca (18+ godina) je umrlo zbog krize', 'How many Men (18 yrs+) are Injured due to the crisis': 'Koliko muškaraca (preko 18 god.) je povrijeđeno usljed krize', 'How many Men (18 yrs+) are Missing due to the crisis': 'Koliko muškaraca (18 god+) je nestalo uslijed krize', 'How many midwives in the health centers are still actively working?': 'Koliko babica u zdravstvenim centrima još aktivno radi?', 'How many new cases have been admitted to this facility in the past 24h?': 'Koliko novih slučajeva je primljeno u ovaj objekat u posljednjih 24h?', 'How many nurses in the health centers are still actively working?': 'Koliko medicinskih sestara u zdravstvenim centrima još aktivno radi?', 'How many of the patients with the disease died in the past 24h at this facility?': 'Koliko pacijenata sa ovom bolesti je umrlo u posljednjih 24h u ovom objektu?', 'How many of the primary school age boys (6-12) in the area are not attending school?': 'Koliko dječaka osnovaca (6-12) u ovom području ne pohađaju školu?', 'How many of the primary school age girls (6-12) in the area are not attending school?': 'Koliko djevojčica osnovaca (6-12) u ovom području ne pohađaju školu?', 'How many of the secondary school age boys (13-18) in the area are not attending school?': 'Koliko srednjoškolskih mladića (13-18) u ovom području ne pohađaju školu?', 'How many of the secondary school age girls (13-18) in the area are not attending school?': 'Koliko srednjoškolskih djevojaka (13-18) u ovom području ne pohađaju školu?', 'How many patients with the disease are currently hospitalized at this facility?': 'Koliko pacijenata sa tom bolesti je trenutno hospitalizovano u ovom objektu?', 'How many primary school age boys (6-12) are in the affected area?': 'Koliko dječaka osnovaca (6-12) je u pogođenom području?', 'How many primary school age girls (6-12) are in the affected area?': 'Koliko djevojčica osnovaca (6-12) je u pogođenom području?', 'How many secondary school age girls (13-18) are in the affected area?': 'Koliko srednjoškolskih djevojaka (13-18) je u pogođenom području?', 'How many teachers have been affected by the disaster (affected = unable to work)?': 'Kako je nastavnika pogođeno katastrofom? (pogođeno = ne može raditi)', 'How many teachers worked in the schools prior to the disaster?': 'Koliko je nastavnika radilo u školi prije katastrofe?', 'How many Women (18 yrs+) are Dead due to the crisis': 'Koliki broj Žena (od 18 godina i više) je poginulo tokom krize', 'How many Women (18 yrs+) are Injured due to the crisis': 'Koliko žena (18+ godina) je povrijeđeno uslijed krize', 'How many Women (18 yrs+) are Missing due to the crisis': 'Koliko žena(18godina+) je nestalo uslijed krize', 'How much detail is seen. A high Zoom level means lot of detail, but not a wide area. A low Zoom level means seeing a wide area, but not a high level of detail.': 'Koliko detalja je vidljivo. Velik nivo zumiranja znači mnogo detalja, ali ne i široku oblast prikaza. Nizak nivo zumiranja znači prikaz široke oblasti, ali ne i visok nivo detalja.', 'How often you want to be notified. If there are no changes, no notification will be sent.': 'Koliko često želite biti obavještavani. Ako nema izumjena, napomene se neće slati.', 'How you want to be notified.': 'Kako želite biti obavještavani', 'HR Manager': 'Menadžer ljudskih resursa', 'HTML class': 'HTML klasa', 'Human Resource': 'Ljudski resurs', 'Human Resource added': 'Ljudski resurs dodan', 'Human Resource assigned': 'Dodijeljeni ljudski resursi', 'Human Resource Assignment updated': 'Dodjela ljudskih resursa ažurirana', 'Human Resource Assignments': 'Dodjeljivanje ljudskih resursa', 'Human Resource Details': 'Detalji ljudskih resursa', 'Human Resource Management': 'Rukovodstvo ljudskim resursima', 'Human Resource removed': 'Ljudski resurs uklonjen', 'Human Resource unassigned': 'Nedodijeljeni ljudski resursi', 'Human Resource updated': 'Ažuriran ljudski resurs', 'Human Resources': 'Ljudski resursi', 'Human Resources Management': 'Upravljanje ljudskim resursima', 'Humanitarian NGO': 'Humanitarna NVO', 'Hungary': 'Mađarska', 'Hurricane': 'Uragan', 'Hurricane Force Wind': 'Vjetar snage uragana', 'Hybrid Layer': 'Hibridni sloj', 'Hygiene': 'Higijena', 'Hygiene kits received': 'Primljeni higijenski kompleti', 'Hygiene kits, source': 'Higijenski kompleti, izvor', 'Hygiene NFIs': 'Higijenski neprehrambeni artikli', 'Hygiene practice': 'higijenska praksa', 'Hygiene problems': 'Higijenski problemi', 'Hygiene promotion': 'Unapređenje higijene', 'I accept. Create my account.': 'Prihvatam. Kreiraj moj račun.', 'I agree to the %(terms_of_service)s': 'Slažem se sa %(terms_of_service)s', 'I am available in the following area(s)': 'Dostupan sam u sljedećim područjima', 'Ice Pressure': 'Pritisak leda', 'Iceberg': 'Santa leda', 'Iceland': 'Island', 'ICT': 'IKT', 'ID': 'IB', 'ID Label': 'ID oznaka', 'ID Label:': 'ID oznaka:', 'ID Tag': 'ID oznaka', 'ID Tag Number': 'Broj identifikacijske kartice', 'ID type': 'ID tip', 'Identificación de Víctimas de Desastres': 'Identifikacija žrtava katastrofe', 'Identification': 'Identifikacija', 'Identification label of the Storage bin.': 'Identifikacija korpe za smještaj', 'Identification Report': 'Izvještaj o identifikaciji', 'Identification Reports': 'Izvještaji identifikacija', 'Identification Status': 'Status identifikacije', 'identified': 'identificiran', 'Identified as': 'Identifikovano kao', 'Identified by': 'Identifikovan od strane', 'Identifier Name for your Twilio Account.': 'Ime identifikatora za vaš Twilio nalog.', 'Identifier which the remote site uses to authenticate at this site when sending synchronization requests.': 'Identifikator koji će udaljeni sajt koristiti za provjeru prijave na ovaj sajt kada šalje zahtjeve za sinhronizacijom.', 'Identities': 'Identiteti', 'Identity': 'Identitet', 'Identity added': 'Identitet dodan', 'Identity deleted': 'Identitet obrisan', 'Identity Details': 'Detalji o identitetu', 'Identity updated': 'Identitet ažuriran', 'IEC Materials': 'IEC materijali', 'If a ticket was issued then please provide the Ticket ID.': 'Ako je kartica izdata molimo vas da obezbijedite ID kartice', 'If a user verifies that they own an Email Address with this domain, the Approver field is used to determine whether & by whom further approval is required.': 'Ako korisnik potvrdi da posjeduje e-mail adresu ove domene, polje odobravatelja će se koristiti da definira da li i od strane koga se traže daljnje potvrde.', 'If checked, the notification will contain all modified records. If not checked, a notification will be send for each modified record.': 'Ako je označeno, napomena će sadržati sve izmijenjene zapise. Ako nije označeno, napomena će biti poslana za svaki izmijenjeni zapis.', 'If it is a URL leading to HTML, then this will downloaded.': 'Ako URL vodi ka HTML-u, ovo će biti preuzeto.', 'If neither are defined, then the Default Marker is used.': 'Ako nijedan nije definisan, onda je korišten Podrazumjevani Znak', 'If no marker defined then the system default marker is used': 'Ako nema definisanog markera onda se koristi standardni marker sistema', 'If no, specify why': 'Ako ne, navedite zašto', 'If none are selected, then all are searched.': 'Ako nijedan nije označen, svi će biti pretraženi.', 'If not found, you can have a new location created.': 'Ako nije nađeno, možete kreirati novu lokaciju.', "If selected, then this Asset's Location will be updated whenever the Person's Location is updated.": 'Ako je odabrano, lokacija ovog sredstva će biti ažurirana kada se kod ažurira lokacija osobe', 'If the location is a geographic area, then state at what level here.': 'Ako je lokacija geografsko područje, navedite na kojem je nivou.', 'If the person counts as essential staff when evacuating all non-essential staff.': 'Ako se osoba računa kao neohodno osoblje pri evakuaciji svog osoblja koje nije neophodno.', 'If the request is for %s, please enter the details on the next screen.': 'Ako je zahtjev za %s, unesite detalje na sljedećem ekranu.', 'If the request type is "Other", please enter request details here.': 'Ako je tip zahtjev "Drugi", unesite detalje zahtjeva ovdje.', 'If the service requries HTTP BASIC Auth (e.g. Mobile Commons)': 'Ako usluga zahtijeva HTTP BASIC Autorizaciju (npr. Mobile Commons)', 'If there are multiple configs for a person, which should be their default?': 'Ako ima više konfiguracija za jednu osobu, koja treba biti podrazumijevana?', "If this configuration is displayed on the GIS config menu, give it a name to use in the menu. The name for a personal map configuration will be set to the user's name.": 'Ako je ova konfiguracija prikazana na GIS konfiguracijskom meniju, dajte ime da se koristi u meniju. Ime za ličnu konfiguraciju mape će se koristiti za korisničko ime.', "If this configuration represents a region for the Regions menu, give it a name to use in the menu. The name for a personal map configuration will be set to the user's name.": 'Ako ova konfiguracija predstavlja lokalitet za Izbornik Lokaliteta, postavite naziv da biste je koristili u izborniku. Kao naziv za ličnu konfiguraciju mape će biti postavljeno ime korisnika.', "If this field is populated then a user who specifies this Organization when signing up will be assigned as a Staff of this Organization unless their domain doesn't match the domain field.": 'Ako je ovo polje popunjeno onda će korisniku koji specificira ovu organizaciju pri upisu biti osoblje organizacije osim ako se njegovo područje ne podudara sa područjem polja.', 'If this field is populated then a user with the Domain specified will automatically be assigned as a Staff of this Organization': 'Ako je ovo polje popunjeno tada korisnik sa navedenom domenom će biti automatski dodijeljen kao osoblje ove organizacije', 'If this is a request template to be added repeatedly then the schedule can be set on the next page.': 'Ako je ovo predložak za zahtjev koji će se dodati iznova, onda se raspored može postaviti na sljedećoj strani', 'If this is set to True then mails will be deleted from the server after downloading.': 'Ako je ovo uključeno, tada će poruke elektronske pošte biti obrisane sa servera nakon preuzimanja', "If this is ticked, then this will become the user's Base Location & hence where the user is shown on the Map": 'Ako je ovo označeno, onda će ovo postati korisnikova osnovna lokacija i samim time lokacija na kojoj će korisnik biti prikazan na mapi.', 'If this record should be restricted then select which role is required to access the record here.': 'Ako bi ovaj zapis trebao biti ograničen, ovdje odaberite kojoj ulozi je dozvoljen pristup zapisu.', 'If this record should be restricted then select which role(s) are permitted to access the record here.': 'Ako je ovaj zapis ograničen, označiti kojim ulogama je dozvoljen pristupovom zapisu', 'If Unit = m, Base Unit = Km, then multiplicator is 0.0001 since 1m = 0.001 km.': 'Ako je Unit = m, Base Unit = Km, tada je multiplikator is 0.0001 jer je 1m = 0.001 km.', 'If yes, specify what and by whom': 'Ako da, navedite šta i od strane koga', 'If yes, which and how': 'Ako jeste, koji i kako', 'If you do not enter a Reference Document, your email will be displayed to allow this data to be verified.': 'Ako ne unesete odgovarajući dokument, vaš e-mail će biti prikazan kako mogli potvrditi ove podatke.', "If you don't see the activity in the list, you can add a new one by clicking link 'Create Activity'.": "Ukoliko aktivnost ne vidite u popisu, možete dodati novu klikom na link 'Kreiraj aktivnost'.", "If you don't see the asset in the list, you can add a new one by clicking link 'Create Asset'.": "Ukoliko sredstvo ne vidite u popisu, možete dodati novo klikom na link 'Kreiraj sredstvo'.", "If you don't see the beneficiary in the list, you can add a new one by clicking link 'Add Beneficiaries'.": 'Ukoliko ne vidite korisnika u popisu, možete dodati novu tako što ćete kliknuti na link "Dodaj korisnika"', "If you don't see the campaign in the list, you can add a new one by clicking link 'Add Campaign'.": 'Ukoliko ne vidite kampanju u popisu, možete dodati novu tako što ćete kliknuti na link "Dodaj kampanju"', "If you don't see the Cluster in the list, you can add a new one by clicking link 'Add New Cluster'.": 'Ukoliko ne vidite skup u popisu, možete dodati novu tako što ćete kliknuti na link "Dodaj novi skup"', "If you don't see the community in the list, you can add a new one by clicking link 'Create Community'.": "Ukoliko zajednicu ne vidite u popisu, možete dodati novu klikom na link 'Kreiraj zajednicu'.", "If you don't see the Hospital in the list, you can add a new one by clicking link 'Add Hospital'.": "Ukoliko bolnicu ne vidite u popisu, možete dodati novu klikom na link 'Dodaj bolnicu'.", "If you don't see the Hospital in the list, you can add a new one by clicking link 'Create Hospital'.": "Ukoliko bolnicu ne vidite u popisu, možete dodati novu klikom na link 'Kreiraj bolnicu'.", "If you don't see the location in the list, you can add a new one by clicking link 'Create Location'.": 'Ukoliko ne vidite lokaciju u popisu, možete dodati novu tako što ćete kliknuti na link "Kreiraj lokaciju"', "If you don't see the Office in the list, you can add a new one by clicking link 'Add Office'.": "Ukoliko ne vidite ured na listi, možete dodati novi klikom na link 'Dodaj ured'", "If you don't see the Organization in the list, you can add a new one by clicking link 'Add Organization'.": 'Ukoliko ne vidite organizaciju u popisu, možete dodati novu tako što ćete kliknuti na link "Dodaj Organizaciju"', "If you don't see the Organization in the list, you can add a new one by clicking link 'Create Organization'.": 'Ukoliko ne vidite organizaciju u popisu, možete dodati novu tako što ćete kliknuti na link "Kreiraj Organizaciju"', "If you don't see the project in the list, you can add a new one by clicking link 'Create Project'.": "Ukoliko projekt ne vidite u popisu, možete dodati novi klikom na link 'Kreiraj projekt'.", "If you don't see the Sector in the list, you can add a new one by clicking link 'Create Sector'.": "Ukoliko ne vidite sektor na listi, možete dodati novi klikom na link 'Kreiraj sektor'", "If you don't see the Type in the list, you can add a new one by clicking link 'Add Region'.": "Ukoliko ne vidite tip na listi, možete dodati novi klikom na link 'Dodaj region'", "If you don't see the type in the list, you can add a new one by clicking link 'Create Activity Type'.": "Ukoliko ne vidite tip na listi, možete dodati novi klikom na link 'Kreiraj tip aktivnosti'", "If you don't see the Type in the list, you can add a new one by clicking link 'Create Facility Type'.": "Ukoliko ne vidite tip na listi, možete dodati novi klikom na link 'Kreiraj tip objekta'", "If you don't see the Type in the list, you can add a new one by clicking link 'Create Office Type'.": "Ukoliko ne vidite tip na listi, možete dodati novi klikom na link 'Kreiraj tip kancelarije'", "If you don't see the Type in the list, you can add a new one by clicking link 'Create Organization Type'.": 'Ukoliko ne vidite tip u popisu, možete dodati novi tako što ćete kliknuti na link "Kreiraj tip organizacije"', "If you don't see the vehicle in the list, you can add a new one by clicking link 'Add Vehicle'.": 'Ukoliko ne vidite vozilo u popisu, možete dodati novu tako što ćete kliknuti na link "Dodaj vozilo"', "If you enter a foldername then the layer will appear in this folder in the Map's layer switcher. A sub-folder can be created by separating names with a '/'": "Ako unesete ime diektorija, onda će se sloj pojaviti u tom direktoriju u prebacivaču sloja mape. Poddirektorij se može kreirati razdvajanjem imena s '/'", 'If you know what the Geonames ID of this location is then you can enter it here.': 'Ako znate koje je Geonames ime područja (ID) ove lokacije unesite ga ovdje', 'If you know what the OSM ID of this location is then you can enter it here.': 'Ako znate OSM ID ove lokacije, možete ga unijeti ovdje.', 'If you need to add a new document then you can click here to attach one.': 'Ako vam je potrebno da dodate novi dokument onda kliknite ovdje kako biste dodali jedan.', "If you specify a module then this will be used as the text in that module's index page": 'Ako navedete modul, ovo će se koristiti kao tekst u indeksnoj stranici modula', "If you specify a resource then this will be used as the text in that resource's summary page": 'Ako navedete resurs, ovo će se koristiti kao tekst u sumarnoj stranici tog resursa', 'If you want several values, then separate with': 'Ukoliko želite više vrijednosti, onda razdvojite sa', 'If you would like to help, then please': 'Ako zelite pomoći, samo izvolite', 'If you would like to help, then please %(sign_up_now)s': 'Ako želite pomoći, onda %(sign_up_now)s', 'ignore': 'zanemari', 'Ignore Errors?': 'Ignoriši greške?', 'Illegal Immigrant': 'Ilegalni doseljenik', 'Image': 'Slika', 'Image added': 'Slika dodana', 'Image deleted': 'Slika obrisana', 'Image Details': 'Detalji slike', 'Image File(s), one image per page': 'Datoteka (datoteke) slika, prikaz jedne slike po stranici', 'Image Tags': 'Oznake na slikama', 'Image Type': 'Vrsta slike', 'Image updated': 'Slika ažurirana', 'Image Upload': 'Postavi sliku', 'Image/Other Attachment': 'Slika/Drugi dodaci', 'Imagery': 'Lik', 'Images': 'Slike', 'Immediate reconstruction assistance, Rank': 'Pomoć u hitnoj rekonstrukciji, stepen', 'Immediately': 'Odmah', 'Immigration and Customs Capabilities': 'Carinske i imigracione mogućnosti', 'Impact added': 'Utjecaj dodat', 'Impact Assessments': 'Procjene utjecaja', 'Impact deleted': 'Utjecaj obrisan', 'Impact Details': 'Detalji utjecaja', 'Impact Type': 'Tip utjecaja', 'Impact Type added': 'Dodan tip utjecaja', 'Impact Type deleted': 'Tip utjecaja obrisan', 'Impact Type Details': 'Detalji o tipu utjecaja', 'Impact Type updated': 'Ažurirana vrsta utjecaja', 'Impact Types': 'Tip utjecaja', 'Impact updated': 'Utjecaj ažuriran', 'Impacts': 'Utjecaji', 'implanted': 'implantat', 'import': 'uvoz', 'Import': 'Uvoz', 'Import & Export Data': 'Uvoz i izvoz podataka', 'Import Activity Data': 'Uvezi podatke aktivnosti', 'Import Activity Type data': 'Uvezi podatke tipa aktivnosti', 'Import Airports': 'Uvezi aerodrome', 'Import and Export': 'Uvoz i izvoz', 'Import Annual Budget data': 'Uvezi podatke godišnje budžeta', 'Import Assets': 'Uvezi sredstva', 'Import Awards': 'Uvezi nagrade', 'Import Base Stations': 'Uvezi bazne stanice', 'Import Catalog Items': 'Uvezi stavke kataloga', 'Import Certificates': 'Uvezi certifikate', 'Import Community Data': 'Uvezi podatke zajednice', 'Import Completed Assessment Forms': 'Uvezi završen formular ocjene', 'Import Contacts': 'Uvezi kontakte', 'Import Courses': 'Uvezi kurseve', 'Import Data': 'Uvezi podatke', 'Import Data for Theme Layer': 'Uvezi podatke za tematskog sloja', 'Import Departments': 'Uvezi odjeljenja', 'Import Event Types': 'Uvezi tipove događaja', 'Import Facilities': 'Uvezi objekte', 'Import Facility Types': 'Uvezi vrste objekata', 'Import File': 'Uvezi datoteku', 'Import File deleted': 'Unosna datoteka izbrisana', 'Import File Details': 'Uvezi detalje datoteke', 'Import Files': 'Uvezi datoteke', 'Import from CSV': 'Uvezi iz CSV', 'Import from OpenStreetMap': 'Uvezi iz OpenStreetMap', 'Import from Ushahidi Instance': 'Importuj iz Ushahidi instance', 'Import Hazard data': 'Uvezi podatke o riziku', 'Import Hazards': 'Uvezi rizike', 'Import Heliports': 'Uvezi heliodrome', 'Import Hours': 'Uvezi sate', 'Import if Master': 'Uvezi ako je Master', 'Import Incident Reports': 'Uvezi izvještaje o incidentu', 'Import Incident Reports from Ushahidi': 'Uvezi izvještaj o incidentu iz Ushahidi', 'Import Incident Types': 'Uvezi tipove incidenta', 'Import Job': 'Uvezi posao', 'Import Job Count': 'Broj poslova uvoza', 'Import job created': 'Posao za uvoz kreiran', 'Import Jobs': 'Uvezi poslove', 'Import Layers': 'Uvezj slojeve', 'Import Location data': 'Uvezi podatke lokacije', 'Import Location Data': 'Uvezi podatke lokacije', 'Import Locations': 'Uvezi lokacije', 'Import Logged Time data': 'Uvezi zabilježene vremenske podatke', 'Import multiple tables as CSV': 'Uvoz više tabela kao CSV', 'Import New File': 'Uvezi novu datoteku', 'Import Offices': 'Uvezi kancelarije', 'Import Organizations': 'Uvezi organizacije', 'Import Participant List': 'Uvezi listu učesnika', 'Import Participants': 'Uvezi učesnike', 'Import Partner Organizations': 'Uvezi partnerske organizacije', 'Import PoI Types': 'Uvezi tipove tačaka interesa', 'Import Points of Interest': 'Uvezi tačke interesa', 'Import Policies & Strategies': 'Uvezi politiku ili strategiju', 'Import Posts': 'Uvezi blok ugradivog teksta', 'Import Project Organizations': 'Uvezi organizacije projekta', 'Import Projects': 'Uvezi projekte', 'Import Resource Types': 'Uvezi tipove resursa', 'Import Resources': 'Uvezi resurse', 'Import Seaports': 'Uvezi luke', 'Import Sector data': 'Uvezi podakte o sektoru', 'Import Series': 'Uvezi serije', 'Import Service data': 'Uvezi podatke usluge', 'Import Services': 'Uvezi usluge', 'Import Staff': 'Uvezi osoblje', 'Import Suppliers': 'Uvezi dobavljače', 'Import Tags': 'Uvezi oznake', 'Import Tasks': 'Uvezi zadatke', 'Import Template Layout': 'Uvezi raspored predložaka', 'Import Templates': 'Uvezi predloške', 'Import Theme data': 'Uvezi podatke teme', 'Import Training Events': 'Uvezi događaje obuke', 'Import Training Participants': 'Uvezi učesnike obuke', 'Import Users': 'Uvezi korisnike', 'Import Volunteer Cluster Positions': 'Uvezi pozicije skupa volontera', 'Import Volunteer Cluster Types': 'Uvezi tipove skup volontera', 'Import Volunteer Clusters': 'Uvezi skupove volontera', 'Import Volunteers': 'Uvezi volontere', 'Import Warehouse Stock': 'Uvezi zalihu skladišta', 'Import Warehouses': 'Uvezi skladišta', 'Import/Export': 'Uvoz/Izvoz', 'Import/Master': 'Uvezi/Master', 'Important': 'Važno', 'Importantly where there are no aid services being provided': 'Važnije gdje nije pružena pomoć', 'Imported': 'Uvezeno', 'Importing data from spreadsheets': 'Unošenje podataka iz tabela', 'Improper decontamination': 'Nepravilna dekontaminacija', 'Improper handling of dead bodies': 'Nepravilno postupanje sa mrtvim tijelima', 'improvement': 'poboljšanje', 'In': 'U', 'In Catalogs': 'U katalozima', 'in Deg Min Sec format': 'u Stepeni Minute Sekunde formatu', 'In error': 'Greška', 'In GeoServer, this is the Layer Name. Within the WFS getCapabilities, this is the FeatureType Name part after the colon(:).': 'U GeoServer, ovo je ime sloja. Unutar WFS getCapabilities, ovo je dio sa FeatureType imenom nakon dvotačke(:).', 'In GeoServer, this is the Workspace Name. Within the WFS getCapabilities, this is the FeatureType Name part before the colon(:).': 'U GeoServer, ovo je ime radnog prostora. Unutar WFS getCapabilities, ovo je dio sa FeatureType imenom nakon dvotačke(:).', 'in GPS format': 'U GPS formatu', 'in Inv.': 'u Inv.', 'In Inventories': 'U zalihama', 'In order to be able to edit OpenStreetMap data from within %(name_short)s, you need to register for an account on the OpenStreetMap server.': 'Da možete mijenjati OpenStreetMap podatke iz %(name_short)s, trebate registrovati nalog na OpenStreetMap serveru.', 'In Process': 'U procesu', 'In Progress': 'U Toku', 'In Stock': 'Na zalihi', 'in Stock': 'na zalihi', 'In transit': 'U prijelazu', 'In Window layout the map maximises to fill the window, so no need to set a large value here.': 'U rasporedu prozora karta se maksimizira da popuni prozor , nema potrebe da se ovdje postavlja velika vrijednost.', 'inactive': 'neaktivno', 'Inbound Mail Settings': 'Podešavanja ulaznih mail-ova', 'InBox': 'Dolazna pošta', 'Incident': 'Slučaj', 'Incident added': 'Dodat incident', 'Incident Categories': 'Kategorije incidenata', 'Incident Commander': 'Komandir incidenata', 'Incident Details': 'Detalji o incidentu', 'Incident removed': 'Incident uklonjen', 'Incident Report': 'Izvještaj o incidentu', 'Incident Report added': 'Dodat izvještaj o incidentu', 'Incident Report deleted': 'Obrisan izvještaj o incidentu', 'Incident Report Details': 'Detalji o izvještaju incidenta', 'Incident Report removed': 'Uklonjen izvještaj o incidentu', 'Incident Report updated': 'Ažuriran izvještaj o incidentu', 'Incident Reporting': 'Izvještavanje o incidentu', 'Incident Reporting System': 'Sistem za izvještavanje o incidentima', 'Incident Reports': 'Izvještaji o incidentu', 'Incident Timeline': 'Vremenski tok incidenta', 'Incident Type': 'Tip incidenta', 'Incident Type added': 'Vrsta incidenta dodana', 'Incident Type Details': 'Detalji o vrsti incidenta', 'Incident Type removed': 'Tip incidenta obrisan', 'Incident Type updated': 'Vrsta incidenta ažurirana', 'Incident Types': 'Tipovi incidenta', 'Incident updated': 'Ažuriran incident', 'Incidents': 'Incidenti', 'Include any special requirements such as equipment which they need to bring.': 'Uključite bilo koje posebne zahtjeve kao npr. opremu koju trebaju donijeti.', 'Include core files': 'Uključi osnovne datoteke', 'Include Entity Information?': 'Uključi informaciju o jedinki?', 'Include only items purchased within the specified dates.': 'Uključi samo stavke kupljene unutar navedenih datuma.', 'Include only items that expire within the specified dates.': 'Uključi samo stavke koje ističu unutar navedenih datuma.', 'Include only items where quantity is in this range.': 'Uključi samo stavke čija je količina unutar navedenog opsega.', "includes a GroundOverlay or ScreenOverlay which aren't supported in OpenLayers yet, so it may not work properly.": 'Uključuje GroundOverlay ili ScreenOverlay koji još nisu podržani u OpenLayers, tako da možda neće raditi ispravno.', 'Incoming': 'Dolazni', 'Incoming Shipment canceled': 'Dolazna pošiljka otkazana', 'Incoming Shipment updated': 'Dolazna pošiljka je ažurirana', 'Incoming Shipments': 'Dolazne pošiljke', 'Incomplete': 'Nepotpuno', 'Incorrect parameters': 'Nevažeći parametri', 'India': 'Indija', 'Individuals': 'Pojedinci', 'Indonesia': 'Indonezija', 'Industrial': 'Industrijska', 'Industrial Crime': 'Industrijski kriminal', 'Industry close to village/camp': 'Industrija u blizini sela/kampa', 'Industry Fire': 'Industrijska vatra', 'Infant (0-1)': 'Novorođenče (0-1)', 'Infectious Disease': 'Infektivne bolesti', 'Infectious Disease (Hazardous Material)': 'Zarazna bolest (Opasan materijal)', 'Infectious Diseases': 'Zarazne bolesti', 'Infestation': 'Napast', 'Informal camp': 'Neformalni kamp', 'Informal Leader': 'Neformalni vođa', 'Information gaps': 'Praznine u informacijama', 'Information Source': 'Izvor informacije', 'Infusion catheters available': 'Sonde za infuziju dostupne', 'Infusion catheters need per 24h': 'Infuzioni kateteri potrebni u 24h', 'Infusion catheters needed per 24h': 'Infuzijski kateteri potrebni po 24h', 'Infusions available': 'Dostupne infuzije', 'Infusions needed per 24h': 'Infuzija potebna u 24h', 'Inherited?': 'Naslijeđeni?', 'initial assessment': 'Početna procjena:', 'Initials': 'Inicijali', 'injured': 'povrijeđeni', 'Injuries': 'Povrede', 'input': 'ulaz', 'Input Job': 'Ulazni posao', 'insert new': 'Ubaci novi', 'insert new %s': 'dodaj novi %s', 'Inspected': 'Pregledano', 'Inspection Date': 'Datum Inspekcije', 'Inspection date and time': 'Datum i vrijeme inspekcije', 'Inspection time': 'Vrijeme inspekcije ili pregleda', 'Inspector ID': 'ID inspektora', 'Instance Type': 'Tip instance', 'Instance URL': 'URL instance', 'Instant Porridge': 'Instant supa', "Instead of automatically syncing from other peers over the network, you can also sync from files, which is necessary where there's no network. You can use this page to import sync data from files and also export data to sync files. Click the link on the right to go to this page.": 'Umjesto automatske sinhronizacije sa ostalih tačaka mreže, možete izvršiti sinhronizaciju preko datoteka, što je neophodno na mjestima gdje nema mreže. Možete koristiti ovu stranicu da uvezete sinhronizacijske podatke iz datoteka, kao i da izvezete podatke u sinhronizacijske datoteke. Kliknite na link desno da biste otišli na ovu stranicu.', 'Institution': 'Institucija', 'Instructor': 'Instruktor', 'Instrument Landing System': 'Instrumentalni sistem za slijetanje', 'Insufficient': 'Nedovoljno', 'insufficient number of pages provided': 'naveden nedovoljan broj strana', 'Insufficient Privileges': 'Nedovoljno ovlasti', 'Insufficient privileges': 'Nedovoljno ovlasti', 'Insufficient vars: Need module, resource, jresource, instance': 'Nedovoljan broj promjenjivih: potrebni su modul, resurs, jresurs, instanca', 'Insurance': 'Osiguranje', 'Insurance Renewal Due': 'Rok za obnovu osiguranja', 'Intake Items': 'Ulazne stavke', 'Intergovernmental': 'Međuvladina', 'Intergovernmental Organisation': 'Međuvladina organizacija', 'Intergovernmental Organization': 'Međuvladina Organizacija', 'Interior walls, partitions': 'Unutarnji zidovi, pregrade', 'Internal Features': 'Interne karakteristike', 'Internal Shipment': 'Interne pošiljke', 'Internal State': 'Unutrašnje stanje', 'International NGO': 'Međunarodna NVO', 'International Organization': 'Međunarodna organizacija', 'International Staff': 'Međunarodno osoblje', 'Intervention': 'Intervencija', 'Interview taking place at': 'intervju se održava u', 'invalid': 'neispravno', 'Invalid': 'Nevažeće', 'Invalid data: record %(id)s not accessible in table %(table)s': 'Pogrešni podaci: slog %(id)s nije dostupan u tabeli %(table)s', 'Invalid email': 'Neispravan email', 'Invalid form (re-opened in another window?)': 'Pogrešan formular (ponovo otvoren u drugom prozoru?)', 'Invalid Location!': 'Pogrešna lokacija!', 'Invalid Organisation ID!': 'Neispravan ID organizacije.', 'Invalid Organization ID!': 'Neispravan organizacijski ID.', 'Invalid phone number': 'Netačan broj telefona', 'Invalid phone number!': 'Pogrešan broj telefona!', 'Invalid Query': 'Pogrešan upit', 'invalid request': 'nevažeći zahtjev', 'Invalid request!': 'Nevažeći zahtjev!', 'Invalid Site!': 'Pogrešno mjesto!', 'Invalid ticket': 'Nevažeća kartica', 'invalid ticket': 'nevažeća kartica', 'Invalid UUID!': 'Nevažeći JMBG!', 'Inventories': 'Zalihe', 'Inventories with Item': 'Skladišta s stavkama', 'Inventories with Items': 'Skladišta s stavkama', 'Inventory': 'Skladište', 'Inventory Adjustment': 'Prilagođenje skladišta', 'Inventory Adjustment Item': 'Prilagođenje artikala u skladištu', 'Inventory functionality is available for:': 'Funkcionalnost skladišta je dostupna za', 'Inventory Item': 'Stavka - Inventar (popis)', 'Inventory Item added': 'Dodana stavka inventara', 'Inventory Item deleted': 'Stavka skladišta obrisana', 'Inventory Item Details': 'Popis detalja artikala u skladištu', 'Inventory Item updated': 'Ažurirana stavka inventara', 'Inventory Items': 'Stavke skladišta', 'Inventory Items include both consumable supplies & those which will get turned into Assets at their destination.': 'Stavke skladišta uključuju prehrambene artikle kao i one koji će biti pretvoreni u sredstva na njihovim odredištima.', 'Inventory Location': 'Lokacija skladišta', 'Inventory Management': 'Upravljanje zalihama', 'Inventory of Effects': 'Inventar efekata', 'Inventory Stock Position': 'Pozicija zaliha inventara', 'Inventory Store added': 'Dodana stavka inventara', 'Inventory Store Details': 'Popis detalja artikala u skladištu', 'Inventory/Ledger': 'Skladište/knjigovodstvo', 'Iraq': 'Irak', 'Ireland': 'Irska', 'is a central online repository where information on all the disaster victims and families, especially identified casualties, evacuees and displaced people can be stored. Information like name, age, contact number, identity card number, displaced location, and other details are captured. Picture and finger print details of the people can be uploaded to the system. People can also be captured by group for efficiency and convenience.': 'je centralno online skladište gdje se mogu čuvati informacije o svim žrtvama nesreće i porodicama, posebno identificiranim gubicima, evakuisanim i raseljenim osobama. Informacije poput imena, godina, kontakt telefona, broja lične karte, trenutnog mjesta boravka i drugih detalja su pohranjene. Slike i otisci prstiju ljudi se mogu učitati u sistem. Ljudi se mogu rasvrstavati po grupama zbog efikasnosti i pogodnosti.', 'Is adequate food and water available for these institutions?': 'Da li je dostupna adekvatna hrana i voda za ove institucije?', 'Is editing level L%d locations allowed?': 'Da li je uređivanje nivoa L%d lokacija dopušteno?', 'is envisioned to be composed of several sub-modules that work together to provide complex functionality for the management of relief and project items by an organization. This includes an intake system, a warehouse management system, commodity tracking, supply chain management, fleet management, procurement, financial tracking and other asset and resource management capabilities': 'Predviđa se da se sastoji od nekoliko pod-modula koji rade zajedno kako bi osigurali složenu funkcionalnost uz pomoć kojih će organizacije lakše upravljati olakšanjima i projektnim predmetima. To uključuje sistem za unos, sistem za upravljanje skladištima, praćenje proizvoda, upravljanje lancem opskrbe, upravljanje voznim parkom, nabavka, financijsko praćenje i druge sposobnosti za upravljanje sredstvima i resursima.', 'Is it safe to collect water?': 'Da li je bezbjedno sakupljanje vode?', 'Is this a strict hierarchy?': 'Da li je ovo stroga hijerarhija?', 'Israel': 'Izrael', 'Issued without Record': 'Izdato bez zapisa', 'Issuing Authority': 'Autoritet (odgovorno lice) za dodjeljivanje resursa', 'It captures not only the places where they are active, but also captures information on the range of projects they are providing in each area.': 'Snima ne samo dio gdje su aktivni ,već također snima podatke u dometu projekata koji pružaju u svakom području.', 'It gives four options: No Sync, Newer Timestamp, Keep All, Replace All': 'Pruža četiri mogućnosti: Bez sinhronizacije, Novija vremenska oznaka, Zarži sve, Zamijeni sve', 'It is built using the Template agreed by a group of NGOs working together as the': 'Izgrađeno koristeći šablon usaglašen od strane grupe NVO radeći zajedno kao', 'Italian': 'Talijanski', 'Italy': 'Italija', 'Item': 'Stavka', 'Item added': 'Stavka dodana', 'Item added to Inventory': 'Stavka je dodana u inventar', 'Item Added to Shipment': 'Dodana Stavka za Pošiljku.', 'Item added to shipment': 'Predmet dodan u pošiljku', 'Item added to stock': 'Stavka dodana u zalihu', 'Item already in budget!': 'Stavka je već u budžetu !', 'Item already in Bundle!': 'Stavka već u paketu!', 'Item already in Kit!': 'Stavka već u kompletu!', 'Item Catalog added': 'Dodata stavka u katalog', 'Item Catalog Categories': 'Kategorije kataloga stavki ', 'Item Catalog Category': 'Kategorija kataloga stavki', 'Item Catalog Category added': 'Dodana kategorija kataloga stavki', 'Item Catalog Category deleted': 'Obrisana kategorija kataloga stavki', 'Item Catalog Category Details': 'Detalji kategorije kataloga stavke', 'Item Catalog Category updated': 'Kategorija kataloga stavke ažurirana', 'Item Catalog deleted': 'Katalog stavki izbrisan', 'Item Catalog Details': 'Pojedinosti o katalogu stavki', 'Item Catalog updated': 'Katalog stavki je ažurirana', 'Item Catalogs': 'Katalozi stavki', 'Item Categories': 'Kategorije stavki', 'Item Category': 'Kategorija stavki', 'Item Category added': 'Dodata kategorija stavki', 'Item Category deleted': 'Obrisana kategorija stavki', 'Item Category Details': 'Detalji o kategorijama stavki', 'Item Category updated': 'Ažurirana kategorija stavki', 'Item Code': 'Šifra stavke', 'Item deleted': 'Stavka obrisana', 'Item Details': 'Detalji o predmetu', 'Item name': 'Ime stavke', 'Item Pack added': 'Paket stavki je dodan', 'Item Pack deleted': 'Paket stavki je obrisan', 'Item Pack Details': 'Sadržaj paketa', 'Item Pack updated': 'Paket stavki je ažuriran', 'Item Packet added': 'Dodat paket stavki', 'Item Packet deleted': 'Paket sa stavkama obrisan', 'Item Packet Details': 'Detalji sadržaja paketa', 'Item Packet updated': 'Ažuriran paket stavki', 'Item Packets': 'Paketi stavki', 'Item Packs': 'Paketi sa stavkama', 'Item quantity adjusted': 'Prilagođena količina stavke', 'Item removed from Inventory': 'Stavka uklonjena iz inventara', 'Item Status': 'Status stavke', 'Item Sub-Categories': 'Pod-kategorije stavke', 'Item Sub-Category': 'Podkategorija stavke', 'Item Sub-Category deleted': 'Obrisana podkategorija stavki', 'Item Sub-Category Details': 'Detalji podkategorije stavke', 'Item Sub-Category updated': 'Podkategorija stavke ažurirana', 'Item Tracking Status': 'Status praćenja stavke', 'Item updated': 'Stavka ažurirana', 'Item(s) added to Request': 'Stavke dodane u zahtjev', 'Item(s) deleted from Request': 'Stavke obrisane iz zahtjeva', 'Item(s) updated on Request': 'Stavke ažurirane u zahtjev', 'Item/Description': 'Stavka/opis', 'Items': 'Stavke', 'Items in Category are Vehicles': 'Stavke u kategoriji su vozila', 'Items in Category can be Assets': 'Stavke u kategoriji mogu biti sredstva', 'Items in Request': 'Stavke u zahtjevu', 'Items in Stock': 'Stavke u zalihi', 'Items/Description': 'Stavke/opis', 'Jamaica': 'Jamajka', 'Japanese': 'Japanski', 'Jerry can': 'Jerry može', 'Jew': 'Jevrej', 'Jewish': 'Jevrejski', 'JNAP Priorities': 'JNAP Prioriteti', 'JNAP-1: Strategic Area 1: Governance': 'JNAP-1: Strateško područje 1: Vlada', 'JNAP-2: Strategic Area 2: Monitoring': 'JNAP-2: Strateško područje 2: Praćenje', 'JNAP-3: Strategic Area 3: Disaster Management': 'JNAP-3: Strateško područje 3: Upravljanje u katastrofama', 'JNAP-4: Strategic Area 4: Risk Reduction and Climate Change Adaptation': 'JNAP-4: Strateško područje 4: Smanjenje rizika i prilagođenje na promjene klime', 'Job added': 'Posao dodan', 'Job deleted': 'Posao obrisan', 'Job reactivated': 'Posao ponovo aktiviran', 'Job Role': 'Radno mjesto', 'Job Role added': 'Dodana uloga posla', 'Job Role Catalog': 'Katalog radnih mjesta', 'Job Role deleted': 'Pozicija obrisana', 'Job Role Details': 'Opis uloge posla', 'Job Role updated': 'Ažurirana uloga posla', 'Job Roles': 'Radno mjesto', 'Job Schedule': 'Raspored poslova', 'Job Title': 'Radno mjesto', 'Job Title added': 'Radno mjesto dodano', 'Job Title Catalog': 'Katalog radnih mjesta', 'Job Title deleted': 'Radno mjesto obrisano', 'Job Title Details': 'Detalji radnog mjesta', 'Job Title updated': 'radnog mjesto ažurirano', 'Job updated': 'Posao ažuriran', 'Jobs': 'Poslovi', 'joining': 'spajanje', 'Joint National Action Plan for Disaster Risk Management and Climate Change Adaptation. Applicable to Cook Islands only': 'Udruženi državni akcioni plan za upravljanje rizicima u slučaju katastrofe i prilagođenju na klimatske promjene. Primjenjivo samo na Kukova ostrva', 'Journal': 'Dnevnik', 'Journal entry added': 'Unos u dnevnik dodan', 'Journal entry deleted': 'Unos u dnevnik obrisan', 'Journal Entry Details': 'Detalji stavke žurnala', 'Journal entry updated': 'Unos u dnevnik ažuriran', 'JS Layer': 'JS sloj', 'Just Once': 'Samo jednom', 'Kazakhstan': 'Kazahstan', 'Keep All': 'Zadrži sve', 'Keep Duplicate': 'Sačuvaj duplikat', 'Keep Local': 'Zadrži lokalne', 'Keep Original': 'Sačuvaj original', 'keeps track of all incoming tickets allowing them to be categorised & routed to the appropriate place for actioning.': 'prati sve dolazne kartice dopuštajući im da se kategoriziraju i preusmjere na odgovarajuća mjesto za dalju akciju.', 'keeps track of all the relief organizations working in the disaster region. It captures not only the places where they are active, but also captures information on the range of projects they are providing in each area.': 'prati sve humanitarne organizaije koje djeluju u regionu katastrove. Snima ne samo mjesta gdje su aktivne ,već također snima informacije o opsegu projekata koje one provode u svakom području.', 'Kenya': 'Kenija', 'Key': 'Ključ', 'Key added': 'Ključ dodan', 'Key deleted': 'Obrisan Ključ', 'Key Details': 'Ključni detalji', 'Key updated': 'Kljuc ažuriran', 'Key Value pairs': 'Parovi ključ-vrijednost', 'Keys': 'Ključevi', 'Keyword': 'Ključna riječ', 'Keyword Added': 'Ključna riječ dodana', 'Keyword Deleted': 'Ključna riječ obrisana', 'Keyword Updated': 'Ključna riječ ažurirana', 'Keywords': 'Ključne riječi', 'kit': 'komplet', 'Kit': 'Komplet', 'Kit added': 'Komplet je dodan', 'Kit canceled': 'Komplet otkazan', 'Kit Contents': 'Sadržaj kompleta', 'Kit Created': 'Komplet kreiran', 'Kit deleted': 'Komplet obrisan', 'Kit Details': 'Detalji kompleta', 'Kit Item': 'Stavka kompleta', 'Kit Items': 'Stavke kompleta', 'Kit Updated': 'Komplet je ažuriran', 'Kit updated': 'Komplet je ažuriran', 'Kit?': 'Komplet?', 'Kits': 'Kompleti', 'Kitting': 'Pakovanje', 'KML Layer': 'KML sloj', 'Known Identities': 'Poznate ličnosti', 'Known incidents of violence against women/girls': 'Poznati incidenti nasilja nad ženama/djevojkama', 'Known incidents of violence since disaster': 'Poznati slučajevi nasilja od katastrofe', 'Known Locations': 'Poznate lokacije', 'Korea, North': 'Sjeverna Koreja', 'Korea, South': 'Južna Koreja', 'Korean': 'Korejski', 'KPIs': 'KPI', 'Kuwait': 'Kuvajt', 'Kyrgyzstan': 'Kirgistan', 'Label': 'Oznaka', 'Lack of material': 'Nedostatak materijala', 'Lack of school uniform': 'Nedostatak školske uniforme', 'Lack of supplies at school': 'Nedostatak zaliha u školi', 'Lack of transport to school': 'Nedostatak prevoza ka školi', 'Lactating women': 'Dojilje', 'Ladder Vehicle 30': 'Platformsko vozilo 30', 'Landslide': 'Klizište', 'Language': 'Jezik', 'Language Code': 'Šifra jezika', 'large': 'širok', 'Last': 'Zadnje', 'Last Checked': 'Zadnja provjera', 'Last Contacted': 'Zadnji kontakt', 'Last Downloaded': 'Zadnje preuzimanje', 'Last known location': 'Posljednja poznata lokacija', "Last Month's Work": 'Rad u zadnjem mjesecu', 'Last Name': 'Prezime', 'Last name': 'Prezime', 'Last Polled': 'Zadnje pregledanje', 'Last Pull': 'Zadnje povlačenje', 'Last pull on': 'Povučeno zadnji put', 'Last Push': 'Zadnje guranje', 'Last push on': 'Gurnuto zadnji put', 'Last run': 'Posljednje pokretanje', 'Last status': 'Zadnji status', 'Last synchronization on': 'Sinhronizovano zadnji put', 'Last synchronization time': 'Vrijeme posljednje sinhronizacije', 'Last updated': 'Zadnji put ažurirano', 'Last updated by': 'Zadnji put ažurirao', 'Last updated on': 'Zadnji put ažurirano', "Last Week's Work": 'Zadnja radna sedmica', 'Latest Information': 'Posljednja informacija', 'Latitude': 'Geografska širina', 'Latitude & Longitude': '(geografska) Dužina i Širina', 'Latitude and Longitude are required': 'Potrebne geografska širina i dužina', 'Latitude is Invalid!': 'Geografska širina nije ispravna!', 'Latitude is North - South (Up-Down).': 'Geografska širina je sjever-jug (gore-golje)', 'Latitude is North-South (Up-Down).': 'Geografska širina je sjever-jug(gore-dolje)', 'Latitude is North-South (Up-Down). Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere.': 'Geografska širina se mjeri od sjevera ka jugu (gore-dolje). Geografska širina je nula na ekvadoru, pozitivna na sjevernoj hemisferi i negativna na južnoj.', 'Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere.': 'Geografska širina je nula na ekvadoru, pozitivna na sjevernoj hemisferi i negativna na južnoj.', 'Latitude must be between -90 and 90.': 'Geografska širina mora biti između -90 i 90', 'Latitude of far northern end of the region of interest.': 'Geografska širina krajnjeg sjevernog kraja regije', 'Latitude of far southern end of the region of interest.': 'Geografska širina je daleko od južnog kraja interesnog područja', 'Latitude of Map Center': 'Geografska širina od centra karte', 'Latitude should be between': 'Geografska širina treba da bude između', 'latrines': 'zahodi', 'Latrines': 'Zahodi', 'Latvia': 'Latvija', 'Law enforcement, military, homeland and local/private security': 'Organi provođenja zakona, vojska, državna i lokalna/privatna sigurnost', 'Layer': 'Sloj', 'Layer added': 'Dodan sloj', 'Layer deleted': 'Obrisan sloj', 'Layer Details': 'Detalji sloja', 'Layer has been Disabled': 'Sloj je onemogućen', 'Layer has been Enabled': 'Sloj je omogućen', 'Layer ID': 'ID Nivoa', 'Layer Name': 'Ime sloja', 'Layer Properties': 'Svojstva sloja', 'Layer removed from Symbology': 'Sloj uklonjen iz značenja simbola', 'Layer Type': 'Tip sloja', 'Layer updated': 'Ažuriran sloj', 'Layers': 'Slojevi', 'Layers updated': 'Slojevi ažurirani', 'Layout': 'Raspored', 'Lead Implementer': 'Vodeći realizator', 'Lead Implementer for this project is already set, please choose another role.': 'Glavni implementator za ovaj projekt je već postavljen, molim izaberite drugu ulofz.', 'Lead Organization': 'Vodeća organizacija', 'Leader': 'Vođa', 'Leave blank to request an unskilled person': 'Ostavi prazno za zahtjev za nekvalificiranom osobom', 'leave empty to detach account': 'ostavite prazno da odvojite račun', 'Lebanon': 'Liban', 'left': 'lijevo', 'Left-side is fully transparent (0), right-side is opaque (1.0).': 'Lijeva strana je potpuno prozirno (0), desna strana je neprovidno (1.0).', 'Left-to-Right': 'Sa lijeva na desno', 'Legend': 'Legenda', 'Legend Format': 'Format legende', 'legend URL': 'opis URL', 'Legend URL': 'URL legende', 'Length': 'Dužina', 'Length (m)': 'Dužina (m)', 'Lesotho': 'Lesoto', 'less': 'manje', 'Less Options': 'Manje opcija', 'Level': 'Nivo', 'Level 1': 'Nivo 1', 'Level 1 Assessment added': 'Procjena razine 1 dodana', 'Level 1 Assessment deleted': 'Izbrisana procjena nivoa 1', 'Level 1 Assessment Details': 'Detalji nivoa 1 procjene', 'Level 1 Assessment updated': 'Procjena prvog stepena ažurirana', 'Level 1 Assessments': 'procjena nivoa1', 'Level 2': 'Nivo 2', 'Level 2 Assessment added': 'Dodat nivo procjene 2.', 'Level 2 Assessment deleted': 'Procjena Nivoa 2 obrisana', 'Level 2 Assessment Details': 'Detalji procjene nivoa 2', 'Level 2 Assessment updated': 'Procjena drugog nivoa ažurirana', 'Level 2 Assessments': 'Procjena nivoa 2', 'Level 2 or detailed engineering evaluation recommended': 'Preporučuje se nivo 2 ili procjena izvedbenog projekta', 'Level 3': 'Nivo 3', "Level is higher than parent's": 'Nivo je veći nego kod roditelja', 'Level of Award': 'Nivo nagrade', 'Level of competency this person has with this skill.': 'Nivo sposobnosti koju ova osoba ima s tom vještinom.', 'Liberia': 'Liberija', 'Library support not available for OpenID': 'Podrška za biblioteku nije dostupna za OpenID', 'Libya': 'Libija', 'LICENCE': 'DOZVOLA', 'LICENSE': 'DOZVOLA', 'License Number': 'Broj dozvole', 'License Plate': 'Registarske tablice', 'Liechtenstein': 'Lihtenštajn', 'light': 'lagane', 'Lighting': 'Osvjetljenje', 'Line': 'Linija', 'LineString': 'Žica', 'Link': 'Veza', 'Link (or refresh link) between User, Person & HR Record': 'Veza (ili osvježena veza) između korisnika, osobe i zapisa o ljudskim resursima', 'Link an Item & Shipment': 'Poveži predmet i pošiljku', 'Link for the RSS Feed.': 'Veza na RSS dovod', 'Link Item & Shipment': 'Poveži predmet i pošiljku', 'Link to this result': 'Veza na ovaj link', 'Links': 'Veze', 'Lips, Shape': 'Usne, oblik', 'List': 'Spisak', 'List %(site_label)s Status': 'Prikaži %(site_label)s status', 'List / Add Baseline Types': 'Popis / Dodaj vrste referentnih vrijednosti', 'List / Add Impact Types': 'Izlistaj/Dodaj tipove utjecaja', 'List / Add Services': 'Izlistaj / Dodaj usluge', 'List / Add Types': 'Izlistaj / Dodaj Tipove', 'List Activities': 'Prikaži aktivnosti', 'List Activity Organizations': 'Prikaži organizacije aktivnosti', 'List Activity Reports': 'Prikaži izvještaje o aktivnostima', 'List Activity Types': 'Prikaži tipove aktivnosti', 'List Addresses': 'Prikaži adrese', 'List Affiliations': 'Prikaži namještenja', 'List Airports': 'Prikaži aerodrome', 'List all': 'Prikaži sve', 'List All': 'Prikaži sve', 'List All Assets': 'Navedite sva sredstva', 'List All Catalog Items': 'Lista svih stavki kataloga', 'List All Catalogs & Add Items to Catalogs': 'Prikaži sve kataloge i dodaj stavke u kataloge', 'List All Commitments': 'Prikaži sva zaduženja', 'List all Entries': 'Prikaži sve unose', 'List All Entries': 'Prikaži sve unose', 'List All Group Memberships': 'Prikaži svo članstvo grupa', 'List All Item Categories': 'Prikaži kategorije stavki', 'List All Memberships': 'Izlistaj sva članstva', 'List All Organization Approvers & Whitelists': 'Prikaži sve potvrđivače u organizaciji i bijele liste', 'List All Received Shipments': 'Izlistaj sve primljene pošiljke', 'List All Records': 'Izlistaj sve zapise', 'List All Reports': 'Prikaži sve izvještaje', 'List All Requested Items': 'Prikaži sve zahtijevane stavke', 'List All Requested Skills': 'Prikaži sve tražene vještinee', 'List All Requests': 'Prikaži sve zahtjeve', 'List All Roles': 'Prikaži sve uloge', 'List All Sent Shipments': 'Izlistaj sve poslane pošiljke', 'List All Users': 'Prikaži sve korisnike', 'List All Vehicles': 'Prikaži sva vozila', 'List Alternative Items': 'Prikaži alternativne stavke', 'List Annual Budgets': 'Prikaži godišnje budžete', 'List Assessment Answers': 'Prikaži odgovore ocjene', 'List Assessment Questions': 'Prikaži pitanja ocjene', 'List Assessment Summaries': 'Izlistaj sažetke procjena', 'List Assessment Templates': 'Prikaži predloške ocjene', 'List Assessments': 'Popis procjenea', 'List Assets': 'Prikaži sredstva', 'List Assigned Human Resources': 'Prikaži dodijeljene ljudske resurse', 'List Availability': 'Pregled dostupnih', 'List available Scenarios': 'Izlistaj dostupne scenarije', 'List Awards': 'Prikaži nagrade', 'List Base Stations': 'Prikaži bazne stanice', 'List Baseline Types': 'Lista tipova referentnih tačaka', 'List Baselines': 'Prikaži referentne tačke', 'List Beneficiaries': 'Prikaži korisnike', 'List Beneficiary Types': 'Prikaži tipove korisnika', 'List Body Finds': 'Prikaži nađena tijela', 'List Branch Organizations': 'Prikaži ogranke organizacije', 'List Brands': 'Prikaži proizvođačke marke', 'List Budgets': 'Izlistaj budžete', 'List Bundles': 'Ispiši pakete', 'List Camp Services': 'Prikaži usluge kampa', 'List Camp Statuses': 'Prikaži statuse kampa', 'List Camp Types': 'Prikaži tipove kampava', 'List Campaign Messages': 'Prikaži poruke kampanje', 'List Campaigns': 'Prikaži kampanje', 'List Camps': 'Prikaži kampove', 'List Cases': 'Prikaži slučajeve', 'List Catalog Items': 'Prikaži stavke kataloga', 'List Catalogs': 'Prikaži kataloge', 'List Category<>Sub-Category<>Catalog Relation': 'Prikaz Kategorija<>Podkategorija<>Kataloški odnos', 'List Certificates': 'Prikaži certifikate', 'List Certifications': 'Prikaži certifikacije', 'List Checklists': 'Prikaži liste zadataka', 'List Cluster Subsectors': 'Izlistaj podsektore skupa', 'List Clusters': 'Prikaži grupisanja', 'List Coalitions': 'Prikaži koalicije', 'List Commitment Items': 'Prikaži stavke zaduženja', 'List Commitments': 'Prikaži zaduženja', 'List Committed People': 'Prikaži zadužene ljude', 'List Communities': 'Prikaži zajednice', 'List Community Contacts': 'Prikaži kontakt podatke zajednice', 'List Competencies': 'Popis Kompetencija', 'List Competency Ratings': 'Prikaži ocjene sposobnosti', 'List Completed Assessment Forms': 'Prikaži završene formulare ocjene', 'List Configs': 'Prikaži konfiguracije', 'List Conflicts': 'lista sukoba', 'List Contact Information': 'Prikaži kontaktne informacije', 'List Contacts': 'Prikaži kontakte', 'List Course Certicates': 'Ispiši certifikovane kurseve', 'List Course Certificates': 'Prikaži certifikate kursa', 'List Courses': 'Prikaži kurseve', 'List Credentials': 'Prikaži akreditive', 'List Current': 'Prikaži trenutne', 'List Data in Theme Layer': 'Prikaži podatke iz tematskog sloja', 'List Departments': 'Prikaži odjeljenja', 'List Details': 'Prikaži detalje', 'List Disaster Assessments': 'Prikaži procjene katastrofe', 'List Distribution Items': 'Prikaži stavke raspodjele', 'List Distributions': 'Prikaži raspodjele', 'List Documents': 'Prikaži dokumente', 'List Donations': 'Prikaži donacije', 'List Donors': 'Prikaži donatore', 'List Education Details': 'Prikaži podatke o obrazovanju', 'List Education Levels': 'Prikaži nivoe obrazovanja', 'List Event Types': 'Prikaži tipove događaja', 'List Events': 'Prikaži događaje', 'List Facilities': 'Prikaži objekte', 'List Facility Types': 'Prikaži vrstw objekata', 'List Feature Classes': 'Izlistaj klase karakteristika', 'List Feature Groups': 'Prikaži grupe karakteristika', 'List Feature Layers': 'Prikaži slojeve karakteristika', 'List Finds': 'Lista pronalaženja', 'List Flood Reports': 'Izlistaj izvještaje o poplavama', 'List Found People': 'Lista nađenih ljudi', 'List GPS data': 'Prikaži GPS podatke', 'List Groups': 'Prikaži grupe', 'List Groups/View Members': 'Izlistaj Grupe/Pogledaj Članove', 'List Hazards': 'Prikaži rizike', 'List Heliports': 'Prikaži heliodrome', 'List Homes': 'Izlistaj domove', 'List Hospitals': 'Prikaži bolnice', 'List Hours': 'Prikaži sate', 'List Human Resources': 'Prikaži ljudske resurse', 'List Identities': 'Prikaži identitete', 'List Images': 'Prikaži slike', 'List Impact Assessments': 'Izlistaj procjene utjecaja', 'List Impact Types': 'Popis vrsta utjecaja', 'List Impacts': 'Nabroji utjecaje', 'List Import Files': 'Ispiši uvezene datoteke', 'List Incident Reports': 'Prikaži izvještaje o incidentu', 'List Incident Types': 'Prikaži tipove incidenta', 'List Incidents': 'Prikaži incidente', 'List Item Catalog Categories': 'Prikaži kategorije stavki kataloga', 'List Item Catalogs': 'Prikaži stavki...', 'List Item Categories': 'Prikaži kategorije stavki', 'List Item Packets': 'Prikaz paketa stavki', 'List Item Packs': 'Prikaži pakete stavki', 'List Item Sub-Categories': 'Prikaži podkategorije stavki', 'List Items': 'Prikaži stavke', 'List Items in Inventory': 'Ispiši stavke u inventaru', 'List Items in Request': 'Prikaži stavke u zahtjevu', 'List Items in Stock': 'Prikaži stavku u zalihi', 'List Job Roles': 'Izlistaj poslovne uloge', 'List Job Titles': 'Prikaži radna mjesta', 'List Jobs': 'Prikaži poslove', 'List Keys': 'Lista ključeva', 'List Keywords': 'Prikaži ključne riječi', 'List Kits': 'Prikaži komplete', 'List Layers': 'Prikaži slojeve', 'List Layers in Profile': 'Prikaži slojeve u profilu', 'List Layers in Symbology': 'Prikaži značenja simbola', 'List Level 1 assessments': 'Izlistaj procjene 1. Nivoa', 'List Level 1 Assessments': 'Prikaži procjene nivoa 1', 'List Level 2 Assessments': 'Ispiši procjene drugog nivoa', 'List Level 2 assessments': 'Ispiši procjene nivoa 2', 'List Location Hierarchies': 'Prikaži hijerarhije lokacija', 'List Locations': 'Prikaži lokacije', 'List Log Entries': 'Prikaži unose zapisnika', 'List Logged Time': 'Prikaži bilježena vremena', 'List Mailing Lists': 'Prikaži liste za slanje poruka', 'List Map Profiles': 'Prikaži konfiguracije mape', 'List Markers': 'Prikaži markere', 'List Members': 'Prikaži članove', 'List Memberships': 'Prikaži članstva', 'List Messages': 'Prikaži poruke', 'List Milestones': 'Prikaži prekretnice', 'List Missing People': 'Lista nestalih ljudi', 'List Missing Persons': 'Prikaži nedostajuće osobe', 'List Missions': 'Prikaži misije', 'List Morgues': 'Kreiraj mrtvačnice', 'List Need Types': 'Prikaži vrste potreba', 'List Needs': 'Lista potreba', 'List Networks': 'Prikaži mreže', 'List of addresses': 'Lista adresa', 'List of Appraisals': 'Prikaži ispunjenja', 'List of CSV files': 'Lista CSV datoteka', 'List of CSV files uploaded': 'Spisak ucitanih CSV(comma separated value) datoteka', 'List of Facilities': 'Prikaži objekte', 'List of Items': 'Potpis predmeta', 'List of Missing Persons': 'Lista osoba koje su nestale', 'List of Peers': 'Lista saradnika', 'List of Professional Experience': 'Prikaži profesionalna iskustva', 'List of Reports': 'Lista Izvještaja', 'List of Requests': 'Lista zahtjeva', 'List of Roles': 'Prikaži uloge', 'List of Spreadsheets': 'Lista proračunskih tablica', 'List of Spreadsheets uploaded': 'Lista poslanih tablica', 'List of Volunteers': 'Lista volontera', 'List of Volunteers for this skill set': 'Lista volontera za ovu skupinu vještina', 'List of Volunteers for this skills set': 'Lista volontera za ovu skupinu vještina', 'List Office Types': 'Prikaži tipove kancelarija', 'List Offices': 'Prikaži kancelarije', 'List Orders': 'Prikaži narudžbe', 'List Organisations': 'Prikaži organizacije', 'List Organization Domains': 'Prikaži domene organizacije', 'List Organization Types': 'Prikaži tipove organizacije', 'List Organizations': 'Prikaži organizacije', 'List Outputs': 'Prikaži izlaze', 'List Participants': 'Prikaži učesnike', 'List Partner Organizations': 'Prikaži partnerske organizacije', 'List Partners': 'Lista partnera', 'List Patients': 'Lista pacijenata', 'List Peers': 'Popis saradnika', 'List Personal Effects': 'Kreiraj lične uticaje', 'List Persons': 'Prikaži osobe', "List Persons' Details": 'Prikaži detalje o osobama', 'List Photos': 'Prikaži fotografije', 'List PoI Types': 'Prikaži tipove tačaka interesa', 'List Points of Interest': 'Prikaži tačke interesa', 'List Policies & Strategies': 'Prikaži politike i strategije', 'List Population Statistics': 'Izlistaj demografsku statistiku', 'List Positions': 'Navedi Pozicije', 'List Posts': 'Prikaži blokove ugradivog teksta', 'List Problems': 'Lista problema', 'List Profiles configured for this Layer': 'Prikaži profile konfigurisane za ovaj sloj', 'List Programs': 'Prikaži programe', 'List Project Organizations': 'Prikaži organizacije projekta', 'List Projections': 'Prikaži projekcije', 'List Projects': 'Prikaži projekte', 'List Question Meta-Data': 'Prikaži metapodatke pitanja', 'List Rapid Assessments': 'Izlistaj brze procjene', 'List Received Items': 'Izlistaj primljene predmete', 'List Received Shipments': 'Prikaži primljene isporuke', 'List Received/Incoming Shipments': 'Prikaži primljene/dolazne pošiljke', 'List Records': 'Prikaži zapise', 'List Recurring Requests': 'Prikaži ponavljajuće zahtjeve', 'List Regions': 'Prikaži područja', 'List Registrations': 'Prikaži registracije', 'List Relatives': 'Izlistaj rodbinu', 'List Reports': 'Prikaži izvještaje', 'List Repositories': 'Prikaži repozitorije', 'List Request Items': 'Lista traženih predmeta', 'List Request Templates': 'Prikaži predloške zahtjeva', 'List Requested Skills': 'Prikaži tražene vještine', 'List Requests': 'Prikaži zahtjeve', 'List Resources': 'Prikaži resurse', 'List Response Summaries': 'Prikaži sumarne odgovore', 'List Responses': 'Prikaz odgovora', 'List Rivers': 'Lista rijeka', 'List Roles': 'Prikaži uloge', 'List Rooms': 'Prikaži sobe', 'List saved searches': 'Prikaži sačuvane pretrage', 'List Scenarios': 'Lista scenarija', 'List School Districts': 'Prikaz rejona škole', 'List School Reports': 'Prikaz izvještaja o školama', 'List Seaports': 'Prikaži luke', 'List Sections': 'Lista sekcija', 'List Sector': 'Prikaz sektora', 'List Sectors': 'Prikaži sektore', 'List Sent Items': 'Prikaži poslane stavke', 'List Sent Shipments': 'Prikaži poslane pošiljke', 'List Series': 'Prikaži serije', 'List Service Profiles': 'Prikaži profile usluga', 'List Services': 'Prikaži usluge', 'List Settings': 'Izlistaj postavke', 'List Shelter Services': 'Prikaži usluge skloništa', 'List Shelter Statuses': 'Prikaži statuse skloništa', 'List Shelter Types': 'Prikaži tipove skloništa', 'List Shelters': 'Prikaži skloništa', 'List Shipment Items': 'Prikaži predmete pošiljke', 'List Shipment/Way Bills': 'Lista Poslanih Pošiljki', 'List Shipment<>Item Relation': 'Prikaži Pošiljka<>Odnos predmeta', 'List Shipments': 'Lista Poslanih Pošiljki', 'List Skill Equivalences': 'Prikaži ekvivalencije vještina', 'List Skill Provisions': 'Izlistaj pružanja vještina', 'List Skill Types': 'Prikaži tipove vještina', 'List Skills': 'Prikaži vještine', 'List Solutions': 'Izlistaj rješenja', 'List Sources': 'Prikaži izvore', 'List Staff': 'Izlistaj osoblje', 'List Staff & Volunteers': 'Prikaži osoblje i vlolontere', 'List Staff Assignments': 'Prikaži dodjele osoblja', 'List Staff Members': 'Prikaži članove osoblja', 'List Staff Types': 'Izlistaj tipove osoblja', 'List Status': 'Ispiši status', 'List Status Reports': 'Prikaži statusne izvještaje', 'List Statuses': 'Prikaži statuse', 'List Stock Adjustments': 'Prikaži prilagođenja zaliha', 'List Stock Counts': 'Prikaži količine zaliha', 'List Stock in Warehouse': 'Prikaži zalihe u skladištima', 'List Storage Bins': 'Prikaz korpi za smještaj', 'List Storage Location': 'Navedi lokacije skladišta', 'List Subscriptions': 'Ispiši pretplate', 'List Subsectors': 'Prikaži podsektore', 'List Suppliers': 'Prikaži dobavljače', 'List Support Requests': 'Izlistaj zahtjeve za podršku', 'List Survey Answers': 'Navedi odgovore anketa', 'List Survey Questions': 'Prikaz anketnih pitanja', 'List Survey Sections': 'Izlistaj sekcije ankete', 'List Survey Series': 'Prikaži istraživačke nizove', 'List Survey Templates': 'Izlistaj šablone za ankete', 'List Symbologies': 'Prikaži značenje simbola', 'List Symbologies for Layer': 'Prikaži značenje simbola za sloj', 'List Tagged Posts': 'Prikaži označene dijelove teksta', 'List Tags': 'Prikaži oznake', 'List Tasks': 'Prikaži zadatke', 'List Teams': 'Prikaži timove', 'List Template Sections': 'Prikaži odjeljke predložaka', 'List Themes': 'Prikaži teme', 'List Tickets': 'Prikaži kartice', 'List Tours': 'Prikaži ture', 'List Tracks': 'Izlistaj praćenja', 'List Training Events': 'Prikaži događaje obuke', 'List Trainings': 'Prikaži obuke', 'List unidentified': 'Prikaži neidentifikovano', 'List Units': 'Prikaži jedinice', 'List Users': 'Prikaži korisnike', 'List Vehicle Assignments': 'Prikaži dodjele vozila', 'List Vehicle Details': 'Prikaži detalje o vozilu', 'List Vehicle Types': 'Prikaži tipove vozila', 'List Vehicles': 'Prikaži vozila', 'List Volunteer Cluster Positions': 'Prikaži pozicije skupa volontera', 'List Volunteer Cluster Types': 'Prikaži tipove skupa volontera', 'List Volunteer Clusters': 'Prikaži skupove volontera', 'List Volunteer Roles': 'Prikaži uloge volontera', 'List Volunteers': 'Prikaži volontere', 'List Warehouse Items': 'Prikaži stavke skladišta', 'List Warehouses': 'Prikaži skladišta', 'List/Add': 'Izlistaj/Dodaj', 'Lists "who is doing what & where". Allows relief agencies to coordinate their activities': 'Popis "ko radi šta i gdje". Omogućava agencijama za pomoć da koordinišu svoje aktivnosti.', 'liter': 'litar', 'Lithuania': 'Litvanija', 'Live Help': 'Pomoć uživo', 'Livelihood': 'Izdržavanje', 'Livelihoods': 'Izdržavanja', 'LMS Administration': 'LMS administracija', 'Load': 'Učitaj', 'Load Cleaned Data into Database': 'Unesi očišćene podatke u bazu podataka', 'Load Raw File into Grid': 'Učitaj neobrađenu datoteku u mrežu', 'Loaded By': 'Učitao', 'Loading': 'Učitavam', 'Loading Equipment': 'Učitavanje opreme', 'Loading Locations...': 'Učitavam lokacije...', 'Local Currency': 'Lokalna valuta', 'Local Name': 'Lokalni naziv', 'Local Names': 'Lokalna imena', 'Location': 'Lokacija', 'Location (Site)': 'Lokacija (mjesto)', 'Location 1': 'Lokacija 1', 'Location 2': 'Lokacija 2', 'Location Added': 'Lokacija dodana', 'Location added': 'Lokacija dodana', 'Location added to Organization': 'Lokacija dodana organizacioji', 'Location cannot be converted into a group.': 'Lokacija ne može biti pretvorena u grupu.', 'Location deleted': 'Lokacija obrisana', 'Location Deleted': 'Izbrisana lokacija', 'Location Detail': 'Detalji lokacije', 'Location Details': 'Detalji lokacije', 'Location Group': 'Grupa lokacija ', 'Location group cannot be a parent.': 'Lokacijska grupa ne može biti roditelj.', 'Location group cannot have a parent.': 'Grupa lokacija ne može imati roditelja', 'Location groups can be used in the Regions menu.': 'Grupe lokacija se mogu koristiti u meniju regija.', 'Location groups may be used to filter what is shown on the map and in search results to only entities covered by locations in the group.': 'Lokacije grupa mogu biti korištene za filtriranje prikaza na mapi i u pretrazi rezultata samo na entitetima pokrivenim lokacijama grupe.', 'Location Hierarchies': 'Hijerarhije lokacija', 'Location Hierarchy': 'Hijerarhija lokacija', 'Location Hierarchy added': 'Hijerarhija lokacija dodana', 'Location Hierarchy deleted': 'Hijerarhija lokacija obrisana', 'Location Hierarchy Level 0 Name': 'Ime hijerarhijske lokacije nultog nivoa', 'Location Hierarchy Level 1 Name': 'Naziv nivoa 1 u hijerarhiji lokacija', 'Location Hierarchy Level 2 Name': 'Ime hijerarhijske lokacije drugog stepena', 'Location Hierarchy Level 3 Name': 'Ime lokacije 3. hijerarhijskog nivoa', 'Location Hierarchy Level 4 Name': 'Ime nivoa 4 u hijerahiji položaja', 'Location Hierarchy Level 5 Name': 'Naziv lokacije hijerarhijskog nivoa 5', 'Location Hierarchy updated': 'Hijerarhija lokacija ažurirana', 'Location is of incorrect level!': 'Lokacija je na neispravnom nivou', 'Location is Required!': 'Zahtijeva se lokacija', 'Location needs to have WKT!': 'Lokacija treba imati WKT!', 'Location removed from Organization': 'Lokacija uklonjena iz organizacije', 'Location Required!': 'Zahtijeva se lokacija!', 'Location updated': 'Lokacija ažurirana', 'Location:': 'Lokacija:', 'Location: ': 'Lokacija: ', 'Locations': 'Lokacije', 'Locations De-duplicator': 'Deduplikator lokacija', 'Locations of this level need to have a parent of level': 'Lokacije ovog nivoa moraju imati roditelja nivoa', 'Locations should be different!': 'Lokacije trebaju biti različite', 'Lockdown': 'Zaključavanje', 'Loctaion of tip': 'Lokacija savjeta', 'Log': 'Zapisnik', 'Log Entry': 'Element zapisnika', 'Log entry added': 'Unos je dodan', 'Log entry deleted': 'Polje za unos izbrisano', 'Log Entry Deleted': 'Stavka zapisnika izbrisana', 'Log Entry Details': 'Detalji stavki zapisnika', 'Log entry updated': 'Unos je ažuriran', 'Log Time Spent': 'Provedeno vrijeme prijave', 'Logged By': 'Evidentirao', 'Logged Time': 'Vrijeme prijave', 'Logged Time Details': 'Detalji vremena prijave', 'Login': 'Prijava', 'login': 'prijava', 'Login using Facebook account': 'Prijava koristeći Facebook nalog', 'Login using Google account': 'Prijava koristeći Google nalog', 'Login with Facebook': 'Prijava na Facebook', 'Login with Google': 'Prijava preko Google', 'Logistics': 'Logistika', 'Logistics Management': 'Upravljane Logistikom', 'Logistics Management System': 'Sistem logističke uprave', 'Logo file %s missing!': 'Nedostaje %s logo datoteka', 'Logo of the organization. This should be a png or jpeg file and it should be no larger than 400x400': 'Logotip organizacije. To treba biti png ili jpeg datoteka i ne treba biti veći od 400x400', 'Logout': 'Odjavi se', 'long': 'dugi', 'Long Name': 'Dugo ime', 'Long Text': 'Dug Tekst', 'Long-term care': 'Dugoročna briga', 'long>12cm': 'dugo>12cm', 'Longitude': 'Geografska dužina', 'Longitude is Invalid!': 'Geografska dužina je neispravna', 'Longitude is West - East (sideways).': 'Geografska dužina je Zapad - Istok (horizontalno)', 'Longitude is West - East (sideways). Latitude is North-South (Up-Down). Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere. Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas. These need to be added in Decimal Degrees.': 'Geografska dužina je Zapad - istog (postrance). Geografska širina je nula na ekvatoru i pozitivna je je na sjevernoj polulopti a', 'Longitude is West - East (sideways). Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.': 'Geografska dužina je zapad - istok. Geografska dužina je nula na glavnom meridijanu (Vrijeme po Griniču) i pozitivna je prema istoku, preko Evrope i Azije. Geografska dužina je negativna na zapadu, preko Atlantika i Amerika.', 'Longitude is West-East (sideways).': 'Geografska dužina: Zapad-istok (horizontalno)', 'Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.': 'Geografska dužina je nula na glavnom meridijanu (Vrijeme po Griniču) i pozitivna je prema istoku, preko Evrope i Azije. Geografska dužina je negativna na zapadu, preko Atlantika i Amerika.', 'Longitude is zero on the prime meridian (through Greenwich, United Kingdom) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.': 'Geografska dužina je jednaka nuli na prvom meridijanu (kroz Grinvič, Velika Britanija) i pozitivna je prema istoku, preko Evrope i Azije. Geografska dužina je negativna prema zapadu, preko Atlantika i Amerike.', 'Longitude must be between -180 and 180.': 'Geografska dužina mora biti broj između -180 i 180', 'Longitude of far eastern end of the region of interest.': 'Geografska dužina istočnog dijela posmatrane regije', 'Longitude of far western end of the region of interest.': 'Geografska dužina krajnje desne/zapadne tačke regiona o kom je riječ', 'Longitude of Map Center': 'Geografska dužina centra mape', 'Longitude should be between': 'Geografska dužina treba biti između', 'Looting': 'Pljačkanje', 'Lost': 'Izgubljeno', 'Lost Password': 'Izgubljena lozinka', 'low': 'nisko', 'Low': 'Nisko', 'Low Tide Depth': 'Dubina niske plime', 'Luxembourg': 'Luksemburg', 'Macedonia': 'Makedonija', 'Machine with which data was exchanged.': 'Mašina s kojom su podaci razmijenjeni.', 'Madagascar': 'Madagaskar', 'Magnetic Storm': 'Magnetna Oluja', 'Mailing list': 'Dopisna lista', 'Mailing list added': 'Lista elektronske pošte dodana', 'Mailing list deleted': 'Lista elektronske pošte obrisana', 'Mailing List Details': 'Detalji liste za slanje poruka', 'Mailing List Name': 'Ime liste za slanje poruka', 'Mailing list updated': 'Lista elektronske pošte ažurirana', 'Mailing Lists': 'Dopisne liste', 'Main cash source': 'Glavni izvor gotovine', 'Main income sources before disaster': 'Glavni izvori prihoda prije nepogode', 'Main?': 'Glavni?', 'Mainstreaming DRR': 'Opšte prihvatanje smanjenjearizika katastrofe', 'Major': 'Bitan', 'Major Damage': 'Značajna šteta', 'Major expenses': 'Glavni troškovi', 'Major outward damage': 'Velika vanjska šteta', 'Make a request': 'Kreiraj zahtjev', 'Make a Request for Aid': 'Kreiraj zahtjev za pomoć', 'Make Commitment': 'Kreiraj zaduženje', 'Make New Commitment': 'Kreiraj novo zaduženje', 'Make People Request': 'Napravi zahtjev za ljudima', 'Make Pledge': 'Obećati podršku', 'Make preparations per the <instruction>': 'Kreirajti pripreme po oznaci <instruction>', 'Make Request': 'Pošalji Zahtjev', 'Make Supplies Request': 'Napravi zahtjev za zalihama', 'Malawi': 'Malavi', 'Malaysia': 'Malezija', 'Maldives': 'Maldivi', 'male': 'Muško', 'Male': 'Muškarac', 'Malnutrition present prior to disaster': 'Neuhranjenost prisutna prije katastrofe', 'Manage': 'Upravljajte', 'Manage Cache': 'Upravljanje kešom', 'Manage Events': 'Upravljaj Događajima', 'Manage Images': 'Upravljaj slikama', 'Manage Incidents': 'Upravljanje incidentima', 'Manage Item catalog': 'Upravljaj katalogom stavki', 'Manage Kits': 'Upravljanje kompletima', 'Manage Layers in Catalog': 'Upravljanje slojevima u katalogu', 'Manage Relief Item Catalogue': 'Upravljanje katalogom humanitarne robe', 'Manage requests for supplies, assets, staff or other resources. Matches against Inventories where supplies are requested.': 'Upravljajte zahtjevima za zalihe, sredstvima, osobljem ili zahtjevima za druge resurse. Poklapanja sa inventarima zaliha se zahtijevaju.', 'Manage requests of hospitals for assistance.': 'Upravljanje zahtjevima bolnica za pomoć.', 'Manage Returns': 'Upravljanje povratima', 'Manage Sub-Category': 'Upraljaj potkategorijama', 'Manage Users & Roles': 'Upravljanje korisnicima i ulogama', 'Manage Vehicles': 'Upravljaj vozilima', 'Manage volunteers by capturing their skills, availability and allocation': 'Upravljaj volonterima vodeći računa o njihovim vještinama, dostupnosti i raspodjeli', 'Manage Warehouses/Sites': 'Upravljanje skladištima/položajima', 'Manage Your Facilities': 'Upravljanje vašim objektima', 'Manager': 'Menadžer', 'Managing Office': 'Ured upravljanja', 'Mandatory. In GeoServer, this is the Layer Name. Within the WFS getCapabilities, this is the FeatureType Name part after the colon(:).': 'Obavezno. U GeoServer, ovo je ime sloja. Unutar WFS getCapabilities, ovo je dio sa FeatureType imenom nakon dvotačke(:).', 'Mandatory. The base URL to access the service. e.g. http://host.domain/geoserver/wfs?': 'Obavezno. Bazni URL za pristup servisu, npr. http://host.domain/geoserver/wfs?', 'Mandatory. The base URL to access the service. e.g. http://host.domain/geoserver/wms?': 'Obavezno. Bazni URL za pristup servisu, npr. http://host.domain/geoserver/wms?', 'Mandatory. The URL to access the service.': 'Obavezno. URL za pristup usluzi.', 'manicured': 'manikiran', 'manual': 'ručno', 'Manual': 'Priručnik', 'Manual Synchronization': 'Ručna sinhronizacija', 'Manual synchronization completed.': 'Ručna sinhronizacija završena', 'Manual synchronization scheduled - refresh page to update status.': 'Raspoređena ručna sinhronizacija - osvježite stranicu da ažurirate status', 'Manual synchronization started in the background.': 'Ručna sinhronizacija započeta u pozadini.', 'Many': 'Mnogo', 'Map': 'Karta', 'Map cannot display without prepop data!': 'Mapa se ne može prikazati bez pripremljenih podataka', 'Map Center Latitude': 'Geografska širina središta mape', 'Map Center Longitude': 'Geografska dužina centra mape', 'Map Profile': 'Konfiguracija karte', 'Map Profile added': 'Dodana konfiguracija mape', 'Map Profile deleted': 'Obrisana konfiguracija mape', 'Map Profile Details': 'Detalji o konfiguraciji mape', 'Map Profile removed': 'Konfiguracija Karte izbrisana', 'Map Profile updated': 'Ažurirana konfiguracija mape', 'Map Profiles': 'Konfiguracija Karte', 'Map has been copied and set as Default': 'Mapa je kopirana i postavljena kao podrazumijevana', 'Map has been set as Default': 'Mapa je postavljena kao podrazumijevana', 'Map Height': 'Visina karte', 'Map is already your Default': 'Mapa je već podrazumijevana', 'Map not available: Cannot write projection file - %s': 'Mapa nije dostupna: Ne mogu pisati datoteku projekcije - %s', 'Map not available: No Projection configured': 'Mapa nije dostupna: nema konfigurisane projekcije', 'Map not available: Projection %(projection)s not supported - please add definition to %(path)s': 'Mapa nije dostupna: Projekcija %(projection)s nije podržana - molim dodajte definiciju u %(path)s', 'Map of Base Stations': 'Mapa baznih stanica', 'Map of Communities': 'Mapa zajednica', 'Map of Facilities': 'Mapa Objekata', 'Map of Hospitals': 'Karta bolnica', 'Map of Incident Reports': 'Mapa izvještaja o incitentu', 'Map of Offices': 'Mapa kancelarija', 'Map of Projects': 'Mapa projekata', 'Map of Requests': 'Mapa zahtjeva', 'Map of Resources': 'Mapa resursa', 'Map of Vehicles': 'Mapa vozila', 'Map of Warehouses': 'Mapa skladišta', 'Map Service Catalogue': 'Katalog usluga mape', 'Map Settings': 'Postavke karte', 'Map Viewing Client': 'Klijent za pregled mapa', 'Map Width': 'Širina mape', 'Map Zoom': 'Uvećanje mape', 'Mapa': 'Mapa', 'MapMaker Hybrid Layer': 'MapMaker hibridni sloj', 'MapMaker Layer': 'Sloj MapMaker', 'Mapping': 'Mapiranje', 'Maps': 'Mape', 'Marine Security': 'Pomorska sigurnost', 'Marital Status': 'Bračno stanje', 'Mark as duplicate': 'Označite kao duplo', 'Mark Sender': 'Označi pošiljaoca', 'Marker added': 'Marker dodan', 'Marker deleted': 'Marker obrisan', 'Marker Details': 'Detalji markera', 'Marker Levels': 'Nivoi markera', 'Marker updated': 'Marker ažuriran', 'Markers': 'Markeri', 'married': 'vjenčan', 'Marshall Islands': 'Maršal ostrva', 'Master': 'Glavni', 'Master Message Log': 'Master zapisnik poruka', 'Master Message Log to process incoming reports & requests': 'Glavni zapisnika poruka za obradu ulaznih izvještaja i zahtjeva', 'Match Percentage': 'Postotak poklapanja', 'Match percentage indicates the % match between these two records': 'Odgovarajući postotak ukazuje na % podudaranja između ova dva zapisa', 'Match Requests': 'Uskladi zahtjeve', 'Match?': 'Slaganje?', 'Matching Catalog Items': 'Odgovarajuće Stavke Kataloga', 'Matching Items': 'Uparene stavke', 'Matching Records': 'Odgovarajući zapisi', 'Matching Vehicle Types': 'Usklađeni tipovi vozila', 'Matrix of Choices (Multiple Answers)': 'Matrica izbora (više odgovora)', 'Matrix of Choices (Only one answer)': 'Matrica izbora (samo jedan odgovor)', 'Matrix of Text Fields': 'Matrica tekstualnih polja', 'Mauritania': 'Mauritanija', 'Mauritius': 'Mauricijus', 'Max Height': 'Maksimalna visina', 'Max Persons per Dwelling': 'Maksimalni broj osoba po jedinici smještaja', 'maxExtent': 'maksimalni obim', 'Maximum': 'Maksimum', 'Maximum Extent': 'Maksimalna širina', 'Maximum Location Latitude': 'Maksimalna geografska širina lokacije', 'Maximum Location Longitude': 'Maksimalna geografska dužina lokacije', 'maxResolution': 'maksimalnaRezolucija', 'Measure Area: Click the points around the polygon & end with a double-click': 'Područje mjerenja: Kliknite na tačke oko poligona i završite s dvostrukim klikom', 'Measure Length: Click the points along the path & end with a double-click': 'Dužina mjerenja: Kliknite na tačke oko staze i završite s dvostrukim klikom', 'Measures': 'Mjere', 'Media Manager': 'Menadžer medija', 'Medical and public health': 'Medicina i javno zdravstvo', 'Medical Conditions': 'Medicinski uslovi', 'Medicine': 'Medicina', 'Medium': 'Srednje', 'medium': 'srednji', 'medium<12cm': 'srednje<12cm', 'Megabytes per Month': 'Megabajta po mjesecu', 'Member Organizations': 'Organizacije članice', 'Members': 'Članovi', 'Membership': 'Članstvo', 'Membership added': 'Dodano članstvo', 'Membership deleted': 'Članstvo izbrisano', 'Membership Details': 'Detalji o članstvu', 'Membership updated': 'Ažurirano članstvo', 'Memberships': 'Članstva', 'Mensajería': 'Slanje poruka', 'Mental': 'Mentalno', 'Menu': 'Meni', 'menu item': 'stavka menija', 'Merge': 'Spoji', 'Merge records': 'Spoji zapise', 'Message': 'Poruka', 'message': 'poruka', 'Message added': 'Dodana poruka', 'Message deleted': 'Poruka obrisana', 'Message Details': 'Detalji poruke', 'Message Log': 'Zapisnik poruka', 'Message Source': 'Izvor poruke', 'Message updated': 'Poruka ažurirana', 'Message variable': 'Varijabla poruke', 'Message Variable': 'Promjenjiva poruke', 'Messages': 'Poruke', 'Messaging': 'Slanje poruka', 'Messaging Module': 'Modul poruka', 'Messaging settings updated': 'Ažurirana podešenja razmjene poruka', 'Metadata': 'Meta podaci', 'Meteorite': 'Meteor', 'Meteorological (inc. flood)': 'Meteorološki (uklj. poplave)', 'meter': 'metar', 'meter cubed': 'kubni metar', 'meters': 'metara', 'Method used': 'Metode korištene', 'Mexico': 'Meksiko', 'MGRS Layer': 'MGRS sloj', 'Micronutrient malnutrition prior to disaster': 'Neuhranjenost mikroelementima prisutna prije katastrofe', 'middle': 'sredina', 'Middle Name': 'Srednje ime', 'Migrants or ethnic minorities': 'Imigranti ili etničke manjine', 'Mileage': 'Kilometraža', 'Milestone': 'Prekretnica', 'Milestone Added': 'Prekretnica dodana', 'Milestone Deleted': 'Prekrednica izbrisana', 'Milestone Details': 'Detalji prekretnice', 'Milestone Updated': 'Prekretnica ažurirana', 'Milestones': 'Prekretnice', 'Military': 'Vojni', 'Minimum': 'Minimum', 'Minimum Bounding Box': 'Minimalna uokviravajuća kutija', 'Minimum Location Latitude': 'Minimalna geografska širina lokacije', 'Minimum Location Longitude': 'Minimalna geografska dužina lokacije', 'Minimum shift time is 6 hours': 'Minimalno vrijeme do smjene je 6 sati', 'Minor Damage': 'Manja šteta', 'Minor/None': 'Minorno/ništa', 'Minorities participating in coping activities': 'Manjine koje učestvuju u akcijama suočavanja', 'Minute': 'minuta', 'Minutes must be a number between 0 and 60': 'Broj minuta mora biti između 0 i 60', 'Minutes must be a number.': 'Minuta mora biti broj', 'Minutes must be less than 60.': 'Minute bi trebale biti broj manji od 60', 'Minutes per Month': 'Minute po mjesecu', 'Minutes should be a number greater than 0 and less than 60': 'Minute bi trebale biti broj veći od nula i manji od 60', 'Minutes should be greater than 0 and less than 60': 'Minute bi trebale biti broj veći od nula i manji od 60', 'Miscellaneous': 'Razno', 'misshapen': 'deformisano', 'missing': 'nedostaje', 'Missing': 'Nestalo', 'Missing Person': 'Nestala osoba', 'Missing Person Details': 'Detalji o nestaloj osobi', 'Missing Person Registry': 'Registar nestalih osoba', 'Missing Person Reports': 'Izvještaji o nestalim osobama', 'Missing Persons': 'Nestale osobe', 'Missing Persons Registry': 'Registar nestalih osoba', 'Missing Persons Report': 'Izvještaj o nestalim osobama', 'Missing Report': 'Nedostajući izvještaj', 'Missing Senior Citizen': 'Izgubljen stariji građanin', 'Missing Vulnerable Person': 'Ranjiva osoba nestala', 'Mission': 'Misija', 'Mission added': 'Dodana misija', 'Mission deleted': 'Misija izbrisana', 'Mission Details': 'Detalji zadatka', 'Mission Record': 'Zapis misije', 'Mission updated': 'Misija ažurirana', 'Missions': 'Misije', 'mixed': 'izmiješano', 'Mobile': 'Pokretno', 'Mobile Assess.': 'Mobilna procjena.', 'Mobile Basic Assessment': 'Mobilna osnovna procjena', 'Mobile Commons (Inbound)': 'Mobile Commons (Ulazna)', 'Mobile Commons Setting added': 'Uobičajene mobilne postavke dodane', 'Mobile Commons Setting deleted': 'Uobičajene mobilne postavke obrisane', 'Mobile Commons Setting Details': 'Detalji za postavke Mobile Commons', 'Mobile Commons Settings': 'Postavke za Mobile Commons', 'Mobile Commons settings updated': 'Uobičajene mobilne postavke ažurirane', 'Mobile Commons SMS Settings': 'Uobičajene SMS postavke', 'Mobile Phone': 'Mobilni Telefon', 'Mobile Phone #': 'Broj mobitela', 'Mobile Phone Number': 'Broj mobilnog telefona', 'Mode': 'Način rada', 'Model/Type': 'Model/Tip', 'Modem Settings': 'Postavke modema', 'Modem settings updated': 'Uobičajene mobilne postavke ažurirane', 'Moderate': 'Umjereno', 'Modify Feature: Select the feature you wish to deform & then Drag one of the dots to deform the feature in your chosen manner': 'Izmjena značajke: Odaberite karakteristiku koju želite deformisati i prevucite tačke da deformišete karakteristiku na izabran način', 'Modify Information on groups and individuals': 'Modifikuj informacije o grupama i pojedincima', 'Modifying data in spreadsheet before importing it to the database': 'Modificiranje podataka u tabeli prije njihovog importovanja u bazu.', 'Module': 'Modul', 'Module Administration': 'Administracija modula', 'module allows the site administrator to configure various options.': 'modul omogućava administratoru stranice da prilagodi razne opcije.', 'Module disabled!': 'Modul je isključen.', 'module helps monitoring the status of hospitals.': 'Modul pomaže nadgledanju statusa bolnica', 'module provides a mechanism to collaboratively provide an overview of the developing disaster, using online mapping (GIS).': "modul pruža mehanizam da se zajednički omogući pregled katastrofe u toku, koristeći 'online' mapiranje (Geografski Informacijski Sistem (GIS) )", 'module provides a mechanism to collaboratively provide an overview of the developing disaster, using online mapping (GIS). You can add markers and pictures to pin point incidents on the map.': "modul pruža mehanizam da se zajednički omogući pregled katastrofe u toku, koristeći 'online' mapiranje (Geografski Informacijski Sistem (GIS) ). Možete dodati markere i slike da pokažete incidente na mapi.", 'Module provides access to information on current Flood Levels.': 'Modul omogućava pristup informacijama o trenutnim nivoima poplave.', 'Module-wise Percentage of Translated Strings': 'Procenat prevedenosti stringova po modulu', 'Moldova': 'Moldavija', 'Monaco': 'Monako', 'Monday': 'Ponedjeljak', 'Monetization': 'Novčana vrijednost', 'Monetization Details': 'Detalji novčane vrijednosti', 'Monetization Report': 'Izvještaj vrijednosti', 'Mongolia': 'Mongolija', 'mongoloid': 'mongoloid', 'Montenegro': 'Crna Gora', 'Month': 'Mjesec', 'Monthly': 'Mjesečno', 'Monthly Cost': 'Mjesečni troškovi', 'Monthly Salary': 'Mjesečna primanja', 'Months': 'Mjeseci', 'more': 'više', 'More Info': 'Više Informacija', 'More Options': 'Više opcija', 'more...': 'više...', 'Morgue': 'Mrtvačnica', 'Morgue added': 'Mrtvačnica dodana', 'Morgue deleted': 'Mrtvačnica obrisana', 'Morgue Details': 'Detalji o mrtvačnici', 'Morgue Status': 'Status Mrtvačnice', 'Morgue Units Available': 'Mrtvačnice na raspolaganju', 'Morgue updated': 'Mrtvačnica ažurirana', 'Morgues': 'Mrtvačnice', 'Morocco': 'Maroko', 'Mosque': 'Džamija', 'Motorcycle': 'Motocikl', 'Moustache': 'Brkovi', 'Mouth, Size': 'Usta, veličina', 'Move Feature: Drag feature to desired location': 'Premještanje karakteristike: Prevucite karakteristiku na željenu lokaciju.', 'Movements (Filter In/Out/Lost)': 'Kretanja (Filter U/Van/Izgubljeno)', 'Mozambique': 'Mozambik', 'Multi-Option': 'Više opcija', 'Multiple': 'Višestruko', 'Multiple Choice (Multiple Answers)': 'Višestruki izbor (više odgovora)', 'Multiple Choice (Only One Answer)': 'Višestruki izbor (samo jedan odgovor)', 'Multiple Matches': 'Višestruko poklapanje', 'Multiple Text Fields': 'Višestruka tekstualna polja', 'Multiplicator': 'Multiplikator', 'MultiPolygon': 'VišePoligonski', 'Muslim': 'Musliman', 'Must a location have a parent location?': 'Mora li lokacija imati lokaciju roditeljsku lokaciju?', 'My Bookmarks': 'Moje zabilješke', 'My Current function': 'Moja Trenutna funkcija', 'My Details': 'Moji detalji', 'My Logged Hours': 'Moji evidentirani sati', 'My Maps': 'Moje mape', 'My Open Tasks': 'Moji otvoreni zadaci', 'My Profile': 'IMoj profil', 'My Tasks': 'Moji zadaci', 'My Volunteering': 'Moje volontiranje', 'Myanmar': 'Mjanmar', 'Módulo de Tickets': 'Modul s karticama', 'n/a': 'nije dostupno', 'N/A': 'N/D', 'Nagorno-Karabakh': 'Nagorno-Karabah', 'Name': 'Naziv', 'Name and/or ID': 'Ime i/ili broj LK', 'Name and/or ID Label': 'Ime i/ili ID oznaka', 'Name field is required!': 'Polje s imenom je obavezno', 'Name for your Twilio Account.': 'Ime za vaš Twilio nalog.', 'Name of a programme or another project which this project is implemented as part of': 'Ime programa ili drugog projekta čiji je ovaj projekt dio', 'Name of Award': 'Ime nagrade', 'Name of Driver': 'Ime vozača', 'Name of Father': 'Ime oca', 'Name of Institute': 'Ime institucije', 'Name of Map': 'Ime mape', 'Name of Mother': 'Ime majke', 'Name of Storage Bin Type.': 'Ime korpe za smještaj', 'Name of the file (& optional sub-path) located in static which should be used for the background of the header.': 'Naziv datoteke (i opcionalno putanja) koja će biti korištena kao pozadina zaglavlja.', 'Name of the file (& optional sub-path) located in static which should be used for the top-left image.': 'Ime datoteke (i opcionalna podstaza ) lociranog u statičnom mjestu , koji bi trebao biti korišten za lijevu gornju sliku.', 'Name of the file (& optional sub-path) located in views which should be used for footer.': 'Ime datoteke (i eventualna podstaza) smještena u pogledima koja se može koristiti za zaglavlje na dnu stranice', 'Name of the person in local language and script (optional).': 'Ime osobe na maternjem jeziku', 'Name of the repository (for you own reference)': 'Ime repozitorija (za vašu vlastitu referencu)', 'Name, Org and/or ID': 'Ime, Org i/ili ID', 'Name/Model/Type': 'Ime/Model/Tip', 'Names can be added in multiple languages': 'Imena mogu biti dodana na više jezika', 'Namibia': 'Namibija', 'narrow': 'usko', 'National': 'Nacionalno', 'National ID Card': 'Lična karata', 'National NGO': 'Nacionalna NVO', 'National Staff': 'Nacionalno osoblje', 'Nationality': 'Nacionalnost', 'Nationality of the person.': 'Nacionalnost ili državljanstvo osobe.', 'natural hazard': 'prirodni rizik', 'Nautical Accident': 'Pomorska nesreća', 'Nautical Hijacking': 'Nautičke otmice', 'NDRT (National disaster response teams)': 'NDRT (Nacionalni tim za odgovor u slučaju katastrofa)', 'Neck, Length': 'Vrat, dužina', 'Neck, Peculiarities': 'Vrat, specifičnosti', 'Neck, Shape': 'Vrat, oblik', "Need a 'url' argument!": "Potreban 'url' argument", 'Need added': 'Potreba dodana', 'Need deleted': 'Potreba obrisana', 'Need to be logged-in to be able to submit assessments': 'Potrebno je da budete prijavljeni da biste podnijeli procjenu', 'Need to configure Twitter Authentication': 'Potrebno je izvršiti konfiguraciju Twitter autentičnosti', 'Need to specify a budget!': 'Pogtrebno je navesti budžet!', 'Need to specify a Budget!': 'Pogtrebno je navesti budžet!', 'Need to specify a bundle!': 'Neophodno je naznačiti paket!', 'Need to specify a feature group!': 'Morate navesti grupu karakteristika', 'Need to specify a group!': 'Morate specifikovati grupu!', 'Need to specify a kit!': 'Potrebno je odrediti komplet!', 'Need to specify a Kit!': 'Potrebno je odrediti komplet!', 'Need to specify a location to search for.': 'Potrebno specificirati traženu lokaciju.', 'Need to specify a Resource!': 'Trebate navesti resurs!', 'Need to specify a role!': 'Mora se specificirati uloga!', 'Need to specify a table!': 'Potrebno je navesti tabelu!', 'Need to specify a user!': 'Potrebno je odrediti korisnika!', 'Need Type': 'Vrsta potreba', 'Need Type added': 'Dodan tip potrebe', 'Need Type deleted': 'Tip potreba izbrisan', 'Need Type Details': 'Potrebni detalji o tipu', 'Need Type updated': 'Vrsta potrebe ažurirana', 'Need Types': 'Tipovi potreba', 'Need updated': 'Potreba ažurirana', 'Needs': 'Potrebe', 'Needs Details': 'Detalji potreba', 'Needs elaboration!!!': 'Treba elaborirati!!!', 'Needs Maintenance': 'Potrebno održavanje', 'Needs to reduce vulnerability to violence': 'Potrebno je smanjiti ranjivost prema nasilju', 'Negative Flow Isolation': 'Negativna izolacija protoka', 'negroid': 'negroid', 'Neighborhood': 'Komšiluk', 'Neighbourhood': 'Susjedstvo', 'Neighbouring building hazard': 'Opasnosti od susjednih zgrada', 'Neonatal ICU': 'Intenzivna njega za novorođenčad', 'Neonatology': 'Neonatologija', 'Netherlands': 'Nizozemska', 'Network': 'Mreža', 'Network added': 'Mreža dodana', 'Network Details': 'Detalji mreže', 'Network removed': 'Mreža uklonjena', 'Network updated': 'Mreža ažurirana', 'Networks': 'Mreže', 'Neurology': 'Neurologija', 'Never': 'Nikada', 'never': 'nikad', 'never update': 'nikad ažurirati', 'new': 'Novo', 'New': 'Novi', 'new ACL': 'novi ACL', 'New Activity Type': 'Novi tip aktivnosti', 'New Annual Budget created': 'Kreiran novi godišnji budžet', 'New Assessment reported from': 'Izvještaj o novoj procjeni iz', 'New Body Find': 'Novo traženje tijela', 'New cases in the past 24h': 'Novi slučajevi u posljednjih 24 sata', 'New Certificate': 'Novi certifikat', 'New Checklist': 'Novi spisak', 'New Entry': 'Novi unos', 'New Entry in Asset Log': 'Nova stavka u zapisniku sredstava', 'New Event': 'Novi događaj', 'New Hazard': 'Novi rizik', 'New Home': 'Novi dom', 'New Item Category': 'Nova kategorija predmeta', 'New Job Role': 'Nova radno mjesto', 'New Location': 'Nova lokacija', 'New Location Group': 'Nova grupa lokacija ', 'New Organization': 'Nova organizacija', 'Add Output': 'Novi izlaz', 'New Page': 'Nova strana', 'New Patient': 'Novi pacijent', 'New Peer': 'Novi saradnik', 'New Post': 'Novi ubacivi tekst', 'New Problem': 'Novi problem', 'New Record': 'Novi zapis', 'new record inserted': 'novi zapis unesen', 'New Records': 'Novi zapisi', 'New Relative': 'Novi srodnik', 'New Report': 'Novi izvještaj', 'New Request': 'Novi zahtjev', 'New Role': 'Nova uloga', 'New Scenario': 'Novi scenario', 'New Sector': 'Novi sektor', 'New Service': 'Nova usluga', 'New Skill': 'Nova vještina', 'New Solution Choice': 'Izbor novog rješenja', 'New Staff Member': 'Novi član osoblja', 'New Stock Adjustment': 'Novo prilagođenje zalihe', 'New Stock Count': 'Nova količina zaliha', 'New Support Request': 'Novi zahtjev za podršku', 'New Synchronization Peer': 'Nova sinhronizacijski saradnik', 'New Team': 'Novi tim', 'New Theme': 'Nova tema', 'New Ticket': 'Nova kartica', 'New Training Course': 'Novi kurs obučavanja', 'New updates are available.': 'Nove dostupne nadogradnje.', 'New Volunteer': 'Novi Volonter', 'New Zealand': 'Novi Zeland', 'Newer Timestamp': 'Novija vremenska oznaka', 'News': 'Novosti', 'Next': 'Sljedeće', 'next 100 rows': 'Narednih 100 redova', 'Next run': 'Sljedeće pokretanje', 'Next View': 'Sljedeći prikaz', 'NGO': 'NVO', 'Nicaragua': 'Nikaragva', 'Nigeria': 'Nigerija', 'No': 'Ne', 'NO': 'NE', 'no': 'nema', 'No access at all': 'Nema nikakvog pristupa', 'No access to this record!': 'Nema pristupa ovom zapisu!', 'No Accounts currently defined': 'Trenutno nema definisanih računa', 'No action recommended': 'Nema preporučene akcije', 'No Activities currently registered in this event': 'Trenutno nema registrovanih Aktivnosti u ovom događaju', 'No Activities Found': 'Nema pronađenih aktivnosti', 'No Activity Organizations Found': 'Nema nađenih organizacija aktivnosti', 'No Activity Types Found': 'Nema nađenih tipova aktivnosti', 'No Activity Types found for this Activity': 'Nema nađenih tipova aktivnosti za ovu aktivnost', 'No Activity Types found for this Project Location': 'Nema nađenih tipova aktivnosti za ovu lokaciju projekta', 'No Addresses currently registered': 'Trenutno nema registrovanih adresa', 'No Affiliations defined': 'Nema definisanih preduzeća', 'No Aid Requests currently registered': 'Trenutno nema registrovanih zahtjeva za pomoć', 'No Airports currently registered': 'Trenutno nema registrovanih aerodroma', 'No Alternative Items currently registered': 'Nema alternativnih artikala registrovanih', 'No annual budgets found': 'Godišnji budžeti nisu nađeni', 'No Appraisals found': 'Nema nađenih poređenja opcija', 'No Assessment Answers': 'Nema odgovore ocjene', 'No Assessment Questions': 'Nema pitanja ocjene', 'No Assessment Summaries currently registered': 'Nema trenutno registrovanih procjena pregleda', 'No Assessment Templates': 'Nema predložaka ocjene', 'No Assessments currently registered': 'Trenutno nema registrovanih procjena', 'No Asset Assignments currently registered': 'Trenutno nema registrovanih sredstava', 'No Assets currently registered': 'Nema sredstva koja je trenutno registrovano', 'No Assets currently registered in this event': 'Trenutno nema registrovanih sredstava na ovom događaju', 'No Assets currently registered in this incident': 'Trenutno nema sredstava registrovanih u ovom incidentu', 'No Assets currently registered in this scenario': 'Trenutno nema sredstava registrovanih u ovom scenariju', 'No Awards found': 'Nema nađenih nagrada', 'No Base Layer': 'Nema baznog sloja', 'No Base Stations currently registered': 'Nema trenutno registrovanih baznih stanica', 'No Baseline Types currently registered': 'Trenutno nije registriran nijedan tip referentne tačke', 'No Baselines currently registered': 'Nijedna referentnu tačku trenutno registrovana', 'No Beneficiaries Found': 'Nema nađenih korisnika', 'No Beneficiary Types Found': 'Nema nađenih tipova korisnika', 'No Branch Organizations currently registered': 'Nema trenutno registrovanih ogranaka organizacija', 'No Brands currently registered': 'Nema trenutno registrovanih marki', 'No Budgets currently registered': 'Nema prijavljenih budžeta trenutno', 'No Bundles currently registered': 'Nema registrovanih paketa', 'No Camp Services currently registered': 'Trenutno nema registrovanih usluga u kampu', 'No Camp Statuses currently registered': 'Trenutno nema registrovanih statusa kampa', 'No Camp Types currently registered': 'Nije registrovan nikakav tip kampa', 'No Campaign Messages Found': 'Nema nađenih poruka kampanje', 'No Campaigns Found': 'Nema nađenih kampanja', 'No Camps currently registered': 'Nijedan Kamp nije trenutno registrovan', 'No Cases found': 'Nema nađenih slučajeva', 'No Catalog Items currently registered': 'Trenutno nije registrovan katalog sa stavkama', 'No Catalogs currently registered': 'Nema trenutno registrovanih kataloga', 'No Checklist available': 'Nijedna kontrolna lista nije dostupna', 'No Cluster Subsectors currently registered': 'Nijedan podsektor skupa trenutačno registrovan', 'No Clusters currently registered': 'Trenutno nema registrovanih skupova', 'No Coalitions currently recorded': 'Nema trenutnio zabilježenih koalicija', 'No Commitment Items currently registered': 'Trenutno nema registriranih stavki zaduženja', 'No Commitments': 'Nema zaduženja', 'No Communities Found': 'Nema nađenih zajednica', 'No Completed Assessment Forms': 'Nema završenih formulara ocjene', 'No Configs currently defined': 'Trenutno nema definisanih konfiguracija', 'No conflicts logged': 'Nisu zabilježeni konflikti', 'No contact information available': 'Nisu dostupne informacije o kontaktu', 'No contact method found': 'Nije pronađena metoda kontakta', 'No Contacts currently registered': 'Nema registriranih kontakata', 'No contacts currently registered': 'Nema registriranih kontakata', 'No Contacts Found': 'Nema nađenih kontakta', 'No contacts yet defined for this site': 'Kontakti još nisu definisani za ovo mjesto', 'No Credentials currently set': 'Nisu postavljeni nijedni akreditivi', 'No data available': 'Nema dostupnih podataka', 'No Data currently defined for this Theme Layer': 'Nema definisanih podataka za ovaj tematski sloj', 'No data in this table - cannot create PDF!': 'Nema podataka u ovoj tabeli - ne može se kreirati PDF!', 'No databases in this application': 'Nema baza podataka u ovom zahtjevu', 'No dead body reports available': 'Nijedan izvještaj o mrtvim tijelima nije dostupan', 'No Details currently registered': 'Trenutno nema registrovanih detalja', 'No Disaster Assessments': 'Nema procjena katastrofe', 'No Distribution Items Found': 'Nisu pronađene stavke raspodjele', 'No Distributions currently registered': 'Trenutno nema registrovanih raspodjela', 'No Distributions Found': 'Nema nađenih raspodjela', 'No Documents currently attached to this request': 'Trenutno nema dokumenata koji su priloženi uz ovaj zahtjev', 'No Documents found': 'Nijedan dokument nije pronađen', 'No Donations': 'Nema donacija', 'No Donors currently registered': 'Trenutno nema registrovanih donatora', 'No education details currently registered': 'Nema trenutno registrovanih nivoa obrazovanja', 'No Education Levels currently registered': 'Nema trenutno registrovanih nivoa obrazovanja', 'No Emails currently in InBox': 'Trenutno nema elektronske pošte u ulaznom sandučetu', 'No Emails currently in Outbox': 'Trenutno nema e-mail poruka u izlaznom sandučetu', 'No Emails currently in Sent': 'Nema trenutno elektronske pošte za slanje', 'No entries currently available': 'Nema trenutno dostupnih unosa', 'No entries found': 'Ništa nije uneseno', 'No entries matching the query': 'Nema stavki vezanih za upit', 'No entry available': 'Nema dostupnog unosa', 'No Event Types currently registered': 'Nijedan tip događaja nije trenutno registrovan', 'No Events currently registered': 'Nema registriranih događaja', 'No Facilities currently registered': 'Trenutno nema registrovanih objekata', 'No Facilities currently registered in this event': 'Nema trenutno registriranih objekata za ovaj događaj', 'No Facilities currently registered in this incident': 'U ovom incidentu trenutno nema registrovanih objekata', 'No Facilities currently registered in this scenario': 'U ovom scenariju trenutno nema registrovanih postrojenja', 'No Facility Types currently registered': 'Nijedan tip objekta nije trenutno registrovan', 'No Feature Classes currently defined': 'Nijedna klasa karakteristika nije trenutno definisana.', 'No Feature Groups currently defined': 'Nijedna klasa mogućnosti nije trenutno definisana.', 'No Feature Layers currently defined': 'Nema trenutno definisanih slojeva karakteristika', 'No file uploaded.': 'Nema poslane datoteke,', 'No Flood Reports currently registered': 'trenutno nema registriranih izvještaja o poplavi', 'No forms to the corresponding resource have been downloaded yet.': 'Još uvijek nisu preuzeti obrasci za odgovarajuće resurse', 'No further users can be assigned.': 'Daljnji korisnici se ne mogu dodijeliti', 'No GPS data currently registered': 'Nema GPS podataka trenutno', 'No Group Memberships currently registered': 'Trenutno nema registrovanih članstava u grupi', 'No Groups currently defined': 'Trenutno nema definisanih grupa', 'No Groups currently registered': 'Trenutno nema registrovanih grupa', 'No Hazards currently registered': 'Trenutno nema registrovanih rizika', 'No Hazards found for this Project': 'Nema nađenih rizika za ovaj projekt', 'No Heliports currently registered': 'Trenutno nema registrovanih helikopterskih sletišta', 'No Homes currently registered': 'Nijedan dom trenutno registrovan', 'No Hospitals currently registered': 'Nema registriranih bolnica', 'No Human Resources currently assigned to this incident': 'Ljudski resursi nisu dodijeljeni ovom incidentu', 'No Human Resources currently registered in this event': 'Nema ljudskih resursa trenutno registriranih za ovaj događaj', 'No Human Resources currently registered in this scenario': 'Nema ljudskih resursa trenutno registrovanih u ovom scenariu', 'No Identification Report Available': 'Nema dostupnog izvještaja o identifikaciji', 'No Identities currently registered': 'Nema trenutno registriranih identiteta', 'No Image': 'Nema fotografije', 'No Images currently registered': 'Nema trenutno registrovanih slika', 'No Impact Types currently registered': 'Nijedan tip utjecaja nije trenutačno registrovan.', 'No Impacts currently registered': 'Nema trenutno zabilježenih utjecaja', 'No Import Files currently uploaded': 'Trenutno nema dodanih datoteka za uvoz', 'No import jobs': 'Nema poslova uvoza', 'No Incident Reports currently registered': 'Nema trenutno registrovanih izvještaja o incidentima', 'No Incident Reports currently registered for this event': 'Nema trenutno registrovanih izvještaja o incidentu za ovaj događaj', 'No Incident Reports currently registered in this incident': 'Trenutno nema izvještaja o incidentu registrovanih u ovom incidentu', 'No Incident Types currently registered': 'Trenutno nema registrovanih tipova incidenta', 'No Incidents currently registered in this event': 'Trenutno nema registrovanih incidenata na ovom događaju', 'No Incoming Shipments': 'Nema dolazećih pošiljki', 'No Inventories currently have suitable alternative items in stock': 'Nijedan inventar trenutno nema odgovarajuću zamjensku stavku u zalihama', 'No Inventories currently have this item in stock': 'Nijedan inventar trenutno nema ovu stavku u zalihama', 'No Inventory Stores currently registered': 'Broj registrovanih stavki u inventaru', 'No Item Catalog Category currently registered': 'Trenutno nema registrovanih predmetnih kategorija', 'No Item Catalog currently registered': 'Trenutno nema kataloga predmeta', 'No Item Categories currently registered': 'Trenutno nema registrovanih predmetnih kategorija', 'No Item currently registered': 'Nema trenutno registrovanih stavki', 'No Item Packets currently registered': 'Nema trenutno registrovanih paketa stavki', 'No Item Packs currently registered': 'Nema trenutno registrovanih paketa', 'No Item Sub-Category currently registered': 'Trenutno nema registrovanih predmetnih kategorija', 'No items currently in stock': 'Nema stavki u zalihama', 'No Items currently registered': 'Nema trenutno registrovanih stavki', 'No Items currently registered in this Inventory': 'Trenutno nema registrovanih stavki u inventaru', 'No Items currently requested': 'Nema trenutno registrovnih stavki', 'No items have been selected for shipping.': 'Nema stavki izabranih za isporuku.', 'No jobs configured': 'Nema podešenih poslova', 'No jobs configured yet': 'Još uvijek nema podešenih poslova', 'No Keys currently defined': 'Nema trenutno definisanih ključeva', 'No Keywords Found': 'Nema nađenih ključnih riječi', 'No Kits': 'Nema kompleta', 'No Kits currently registered': 'Trenutno nema registrovanih kompleta', 'No Layers currently configured in this Profile': 'Nema slojeva konfigurisanih u ovom profilu', 'No Layers currently defined': 'Nema trenutno definisanih slojeva', 'No Layers currently defined in this Symbology': 'Nema definisanih slojeva za ovo značenje simbola', 'No Level 1 Assessments currently registered': 'Nema procjene prvog nivoa koja je trenutno registrovana', 'No Level 2 Assessments currently registered': 'Nivo 2 procjene je trenutno registriran', 'No Location Hierarchies currently defined': 'Trenuitno nije definisana hijerarhija lokacija', 'No location information defined!': 'Nema definisanih informacija o lokaciji', 'No location known for this person': 'Ne postoji poznata lokacija za ovu osobu', 'No Locations currently available': 'Nema trenutno dostupnih lokacija', 'No Locations currently registered': 'Nijedna Lokacija trenutno nije registrovana', 'No Locations Found': 'Nema nađenih lokacija', 'No locations found for members of this team': 'Nisu pronađene lokacije za članove ovog tima', 'No Locations found for this Organization': 'Nisu pronađene lokacije za ovu lokaciju', 'No locations registered at this level': 'Nema registrovanih lokacija na ovom nivou', 'No log entries matching the query': 'Nema podudaranja u zapisnika za upit', 'No Mailing List currently established': 'Nema trenutno u', 'No Map Profiles currently defined': 'Nema trenutno definisane konfiguracije mape', 'No Map Profiles currently registered in this event': 'Trenutno nije registrovana konfiguracije karte u ovom događaju', 'No Map Profiles currently registered in this incident': 'Trenutno nema konfiguracija mapa registrovanih u ovom incidentu', 'No Map Profiles currently registered in this scenario': 'Nijedna konfiguracija karte nije trenutno registrovana u ovom scenariju.', 'No Markers currently available': 'Nema trenutno dostupnih markera', 'No match': 'Nema poklapanja', 'No Match': 'nema podudaranja', 'No Matching Catalog Items': 'Nema odgovarajućih kataloških stavki', 'No Matching Items': 'Nema odgovarajućih stavki', 'No Matching Records': 'Nema niti jedan zapis', 'No matching records found': 'Nisu pronađeni odgovarajući zapisi', 'No matching records found.': 'Nisu pronađeni odgovarajući zapisi', 'No Matching Vehicle Types': 'Nema odgovarajućih tipova vozila', 'No Members currently registered': 'Nema korisnika trenutno registrovanih', 'No Memberships currently defined': 'Trenutno nema definisanih članstava', 'No Memberships currently registered': 'Nema trenutno prijavljenog članstva', 'No Messages currently in InBox': 'Trenutno nema poruka u ulaznom sandučetu', 'No Messages currently in Outbox': 'Trenutno nema poruka u izlaznom sandučetu', 'No Messages currently in the Message Log': 'Nema poruka u dnevniku poruka', 'No messages in the system': 'Nema poruka u sistemu', 'No Milestones Found': 'Nema nađenih prekretnica', 'No Mobile Commons Settings currently defined': 'Mobilne postavke trenutno nisu definisane', 'No more items may be added to this request': 'Ne može se više stavki dodati na ovaj zahtjev', 'No morgues found': 'Nema nađenih mrtvačnica', 'No Need Types currently registered': 'trenutno nema registriranih tipova potrebe', 'No Needs currently registered': 'Nema trenutno registrovane potrebe', 'No Networks currently recorded': 'Nema trenutno zabilježenih mreža', 'No of Families Settled in the Schools': 'Broj porodica smještenih u školama', 'No of Families to whom Food Items are Available': 'Broj porodica za koje su dostupni prehrambeni artikli', 'No of Families to whom Hygiene is Available': 'Broj porodica kojima je higijena dostupna', 'No of Families to whom Non-Food Items are Available': 'Broj porodica za koje su dostupni neprehrambeni artikli', 'No of Female Students (Primary To Higher Secondary) in the Total Affectees': 'Broj ženskih učenika (osnovne i srednje škole) od ukupno pogođenih', 'No of Female Teachers & Other Govt Servants in the Total Affectees': 'Broj žena u nastavi i drugim vladinim uslugama u ukupno broju pogođenih', 'No of Male Students (Primary To Higher Secondary) in the Total Affectees': 'Broj muških učenika (osnovne i srednje škole) od ukupno pogođenih', 'No of Male Teachers & Other Govt Servants in the Total Affectees': 'Broj muškaraca u nastavi i drugim vladinim uslugama u ukupno broju pogođenih', 'No of Rooms Occupied By Flood Affectees': 'Broj soba koje su zauzele osobe pogođene poplavom', 'No Office Types currently registered': 'Trenutno nema registrovanih tipova kancelarija', 'No Offices currently registered': 'Nema trenutno registrovanih kancelarija', 'No Offices found!': 'Uredi nisu pronađeni!', 'No Open Tasks for %(project)s': 'Nema otvorenih zadataka za %(project)s', 'No options available': 'Nema dostupnih opcija', 'no options available': 'nema dostupnih opcija', 'No options currently available': 'Nema trenutno dostupnih opcija', 'No Orders registered': 'Nema registrovanih narudžbi', 'No Organization Domains currently registered': 'Trenutno nema registrovanih domena organizacija', 'No Organization Types currently registered': 'Trenutno nema registrovanih tipova organizacija', 'No Organizations currently registered': 'Nema trenutno registrovane organizacije', 'No Organizations for Project(s)': 'Nema organizacija za projekt(e)', 'No Organizations found for this Policy/Strategy': 'Nije nađena organizacija za ovu politiku/strategiju', 'No outputs defined': 'Nema nađenih izlaza', 'No Packets for Item': 'Nema paketa za artikle', 'No Packs for Item': 'Nema paketa za artikle', 'No Parsers currently connected': 'Nijedan parser nije trenutno povezan', 'No Partner Organizations currently registered': 'Nema trenutno registrovanih partnerskih organizacija', 'No Patients currently registered': 'Trenutno nema registrovanih pacijenata', 'No peers currently registered': 'Nema trenutno registrovanih saradnika', 'No Peers currently registered': 'Nema trenutno registrovanih suradnika', 'No pending registrations found': 'Na čekanju nema zahtjeva za registraciju', 'No pending registrations matching the query': 'Nema registracije na čekanju za vaš upit', 'No People currently committed': 'Trenutno nema posvecenih Ljudi', 'No People currently registered': 'Trenutno nema registrovanih ljudi', 'No People currently registered in this camp': 'Trenutno nema prijavljenih ljudi u ovom kampu', 'No People currently registered in this shelter': 'Za sad nema registrovanih u skloništu', 'No person record found for current user.': 'Nisu pronađeni lični podaci za trenutnog korisnika.', 'No Persons currently registered': 'Trenutno nema registrovanih osoba', 'No Persons currently reported missing': 'Trenutno nema registrovanih nestalih osoba', 'No Persons found': 'Osoba nije pronađena', 'No Photos found': 'Slike nisu nađene', 'No Picture': 'Nema Slike', 'No PoI Types currently available': 'Nema trenutno dostupnih tipova tačaka interesa', 'No Points of Interest currently available': 'Nema trenutno dostupnih tačaka interesa', 'No PoIs available.': 'Nema dostupnih tačaka interesa', 'No Policies or Strategies found': 'Nema nađenih politika ili strategija', 'No Population Statistics currently registered': 'Ne postoji registrovana statistika stanovništva', 'No Posts available': 'Nema dostupnih ubacivih tekstova', 'No posts currently available': 'Nema trenutno dostupnih ubacivih tekstova', 'No posts currently set as module/resource homepages': 'Nema ubacivih tekstova postavljenih kao početne stranice za modul/resurs', 'No posts currently tagged': 'Nema trenutno označenih ubacivih tekstova', 'No Posts currently tagged to this event': 'Ubacivi tekstovi nisu označeni za ovaj događaj', 'No Presence Log Entries currently registered': 'Trenutno nema registrovanih stavki zapisnika prisustva', 'No problem group defined yet': 'Nema još definisane grupe problema', 'No Problems currently defined': 'Nijedan problem trenutno nije definisan', 'No Professional Experience found': 'Nije nađeno profesionalno iskustvo', 'No Profiles currently have Configurations for this Layer': 'Nema profila konfigurisanih za ovaj sloj.', 'No Projections currently defined': 'Trenutno nema definisanih projekcija', 'No Projects currently registered': 'Treunutno nema registrovanih projekata', 'No projects currently registered': 'Treunutno nema registrovanih projekata', 'No Query currently defined': 'Trenutno nema definisanih upita', 'No Question Meta-Data': 'Nema metapodataka pitanja', 'No Rapid Assessments currently registered': 'Nema prijekih procjena trenutno registrovanih', 'No Ratings for Skill Type': 'Nema ocjena za tip vještine', 'No Received Items currently registered': 'Nema registritanih primljenih stavki', 'No Received Shipments': 'Nema primljenih pošiljki', 'No Records currently available': 'Trenutno nema nikakvih podataka', 'No records found': 'Nisu pronađeni zapisi', 'No records in this resource. Add one more records manually and then retry.': 'Nema zapisa u ovom resursu. Dodajte ručno jedan ili više zapisa i probajte ponovo.', 'No Records matching the query': 'Nema zapisa koji odgovaraju upitu', 'No records matching the query': 'Nema zapisa koji odgovaraju upitu', 'No records to review': 'Nije zapisa za pregled', 'No recovery reports available': 'Trenutno nema dostupnih izvještaja o pronalasku', 'No Regions currently registered': 'Trenutno nema registrovanih područja', 'No Relatives currently registered': 'Nijedan srodnik nije trenutno prijavljen', 'No report available.': 'Nema dostupnog izvještaja', 'No report specified.': 'Nema navedenog izvještaja.', 'No reports available.': 'Niti jedan izvještaj nije dostupan.', 'No reports currently available': 'Trenutno nema dostupnih izvještaja', 'No repositories configured': 'Nema podešenih repozitorija', 'No Request Items currently registered': 'Nema trenutno registrovanih stavki koje se zahtijevaju', 'No Request Shipments': 'Nema zahtijeva pošiljki', 'No Request Templates': 'Nema predložaka zahtjeva', 'No Requests': 'Nema zahtjeva', 'No requests currently registered': 'Trenutno nema registrovanih zahtjeva', 'No requests found': 'Zahtjevi nisu pronadjeni', 'No Resource Types defined': 'Nema definisanih tipova resursa', 'No Resources assigned to Incident': 'Resursi nisu dodijeljen ovom incidentu', 'No resources configured yet': 'Još uvijek nema podešenih resursa', 'No resources currently registered': 'Trenutno nema registrovanih resursa', 'No resources currently reported': 'Trenutno nema prijavljenih sredstava', 'No Resources in Inventory': 'Nema resursa u zalihama', 'No Response': 'Nema odgovora', 'No Response Summaries Found': 'Nema sumarnih odgovora nađeno', 'No Responses currently registered': 'Trenutno nema registrovanih odgovora', 'No Restrictions': 'Bez ograničenja', 'No Rivers currently registered': 'Nema trenutno registrovanih rijeka', 'No role to delete': 'Nema uloge za brisanje', 'No roles currently assigned to this user.': 'Nema uloga dodijeljenih ovom korisniku.', 'No Roles currently defined': 'Uloga nije trenutno definirana', 'No Roles defined': 'Nijedna uloga nije definirana', 'No Rooms currently registered': 'Nema trenutno registriranih soba', 'No Scenarios currently registered': 'Nema trenutno prijavljenih scenarija', 'No School Districts currently registered': 'Trenutno nema registrovanih školskih rejona', 'No School Reports currently registered': 'Trenutno nema registriranih izvještaja o školama', 'No Seaports currently registered': 'Trenutno nema registrovanih luka', 'No Search saved': 'Nema snimljene pretrage', 'No Sections currently registered': 'Nema trenutno registrovanih odjela', 'No Sectors currently registered': 'Sektori trenutno nisu registrovani', 'No Sectors found for this Organization': 'Nisu pronađeni sektori za ovu lokaciju', 'No Sectors found for this Project': 'Nema nađenih sektora za ovaj projekt', 'No Sectors found for this Theme': 'Nema nađenih sektora za ovu temu', 'No Senders Whitelisted': 'Nema pošiljaoca na bijeloj listi', 'No Sent Items currently registered': 'Nema trenutno registrovanih poslanih stvari', 'No Sent Shipments': 'Nema poslanih pošiljki', 'No series currently defined': 'Trenutno nema definisanih serija', 'No service profile available': 'Nema dostupnog profila usluge', 'No Services currently registered': 'Trenutno nema registrovanih usluga', 'No Services found for this Organization': 'Nisu pronađene usluge za ovu organizaciju', 'No Settings currently defined': 'Nema trenutno definisanih postavki', 'No Shelter Services currently registered': 'Trenutno nema registriranih usluga skloništa', 'No Shelter Statuses currently registered': 'Trenutno nema registrovanih statusa skloništa', 'No Shelter Types currently registered': 'Trenutno nema registrovanih tipova skloništa', 'No Shelters currently registered': 'Trenutno nema registrovanih skloništa', 'No Shipment Items': 'Nema stavki pošiljke', 'No Shipment Transit Logs currently registered': 'Trenutno nema registrovanih tranzitnih zapisa', 'No Shipment/Way Bills currently registered': 'Trenutno nema registrovanih dostava/putnij n aloga', 'No Skill Types currently set': 'Nijedna vrsta vještina nije trenutno podešena', 'No Skills currently requested': 'Nema trenutno traženih vještina', 'No skills currently set': 'Nijedna vještina nije trenutno podešena', 'No Skills Required': 'Nema potrebnih vještina', 'No SMS currently in InBox': 'Trenutno nema SMS u ulaznom sandučetu', 'No SMS currently in Outbox': 'Trenutno nema SMS u izlaznom sandučetu', "No SMS's currently in Sent": 'Broj SMS u poslanom sandučetu', 'No Solutions currently defined': 'Nema definisanih rješenja', 'No Staff currently registered': 'Nema osoblja trenutno registrovanog', 'No staff or volunteers currently registered': 'Trenutno nema registrovanih članova osoblja ili volontera', 'No Staff Types currently registered': 'Nijedan tip osoblja trenutno registrovan', 'No status information available': 'Informacije o statusu nisu dostupne', 'No status information currently available': 'Nema trenutno dostupnih statusnih informacija', 'No Statuses currently registered': 'Trenutno nema registrovanih statusa', 'No stock adjustments have been done': 'Prilagođenja zaliha nisu obavljena', 'No stock counts have been done': 'Količine zaliha nisu obavljene', 'No Stock currently registered': 'Trenutno nema registrovanih zaliha', 'No Stock currently registered in this Warehouse': 'Nema zaliha registrovanih za ovo skladište', 'No Storage Bin Type currently registered': 'Nijedan tip korpe za smještaj trenutno registrovan', 'No Storage Bins currently registered': 'Trenutno nema registrovanih korpi za smještaj', 'No Storage Locations currently registered': 'Trenutno nema registrovanih logacija za smještaj', 'No Subscription available': 'Nema dostupne pretplate', 'No Subsectors currently registered': 'Trenutno nema registrovanih podsektora', 'No Suppliers currently registered': 'Trenutno nema registrovanih dobavljača', 'No Support Requests currently registered': 'Trenutno nema registrovanih Zahtjeva za podršku', 'No Survey Answers currently entered.': 'Nema trenutno unesenih odgovora na ankete', 'No Survey Answers currently registered': 'Trenutno nema registrovanih anketnih odgovora', 'No Survey Questions currently registered': 'Trenutno nema registrovanih anketnih pitanja', 'No Survey Sections currently registered': 'Trenutno nema registrovanih anketnih odjela', 'No Survey Series currently registered': 'Nema registrovanih Serija Anketa', 'No Survey Template currently registered': 'Trenutno nema registrovanog šablona za anketu', 'No Symbologies currently defined': 'Trenutno nema definisanih značenja simbola', 'No Symbologies currently defined for this Layer': 'Nema trenutno definisanih značenja simbola za ovaj sloj', 'No Sync': 'Bez sinhronizacije', 'No sync permitted!': 'Sinhronizacija nije dozvoljena', 'No synchronization': 'Bez sinhronizacije', 'No tags currently defined': 'Trenutno nema definisanih oznaka', 'No Tasks Assigned': 'Nema dodijeljenih zadataka', 'No tasks currently assigned': 'Trenutno nema dodijeljenih zadataka', 'No tasks currently registered': 'Nema trenutno registriranih zadataka', 'No Tasks currently registered in this event': 'Nijedan zadatak trenutno nije registrovan u ovaj događaj', 'No Tasks currently registered in this incident': 'Trenutno nema zadataka registrovanih u ovom incidentu', 'No Tasks currently registered in this scenario': 'Trenutno nema registrovanih zadataka u ovom scenariju', 'No Tasks with Location Data': 'Nema Zadataka sa Podacima o Lokaciji', 'No Teams currently registered': 'Trenutno nema registrovanih timova', 'No template found!': 'Nema šablona pronađenog!', 'No Template Sections': 'Nema odjeljaka predložaka', 'No Themes currently defined': 'Nijedna Tema nije trenutno definisana', 'No Themes currently registered': 'Trenutno nema registrovanih tema', 'No Themes found for this Activity': 'Nema nađenih tema za ovu aktivnost', 'No Themes found for this Project': 'Nema nađenih tema za ovaj projekat', 'No Themes found for this Project Location': 'Nema nađenih tema za ovu lokaciju projekta', 'No Tickets currently registered': 'Trenutno nema registrovanih kartica', 'No Time Logged': 'Nema zabilježenog vremena', 'No Tours currently registered': 'Trenutno nema registrovanih tura', 'No Tracks currently available': 'Trenutno nema dostupnih zapisa', 'No translations exist in spreadsheet': 'Ne postoje prijevodi u ovoj tablici', 'No Tweets Available.': 'Nema dostupnih Tweet', 'No Tweets currently in InBox': 'Trenutno nema Tweet u ulaznom sandučetu', 'No Tweets currently in Outbox': 'Trenutno nema Tweet u izlaznom sandučetu', 'No Twilio Settings currently defined': 'Nema trenutno definisanih Twilio postavki', 'No Units currently registered': 'Trenutno nema regitrovanih jedinica', 'No units currently registered': 'Trenutno nema regitrovanih jedinica', 'No Users currently registered': 'Nema trenutno registrovanih korisnika', 'No users have taken a tour': 'Nijedan korisnik nije uzeo turu', 'No users with this role at the moment.': 'Nema korisnika s ovom ulogom u datom trenutku.', "No UTC offset found. Please set UTC offset in your 'User Profile' details. Example: UTC+0530": 'Nema pomaka od griničkog vremena. Molim navedite UTC pomak u korisničkim profilima. Primjer: UTC+0530', 'No Vehicle Details currently defined': 'Trenutno nisu definisani detalji vozila', 'No Vehicle Types currently registered': 'Trenutno nema registrovanih tipova vozila', 'No Vehicles currently assigned to this incident': 'Vozila nisu dodijeljena ovom incidentu', 'No Vehicles currently registered': 'Nema registrovanih vozila', 'No volunteer availability registered': 'nema registirane dostupnosti volontera', 'No Volunteer Cluster Positions': 'Nema pozicija skupa volontera', 'No Volunteer Cluster Types': 'Nema tipova skupa volontera', 'No Volunteer Clusters': 'Nema skupova volontera', 'No volunteer information registered': 'nema registiranih informacija o volonterima', 'No Volunteers currently registered': 'Trenutno nema registrovanih volontera', 'No Warehouse Items currently registered': 'Nema registrovanih stavki skladišta', 'No Warehouses currently registered': 'Trenutno nema registrovanih skladišta', 'No Warehouses match this criteria': 'Nema skladišta koja odgovaraju ovom kriteriju', 'non-critical': 'ne-kritično', 'Non-medical Staff': 'Nemedicinsko osoblje', 'Non-structural Hazards': 'Nestrukturne opasnosti', 'None': 'Nema', 'none': 'Nijedno', 'None (no such record)': 'Nijedan (ne postoji takav zapis)', 'None of the above': 'Ništa od noga', 'Noodles': 'Tjestenine', 'Normal': 'Normalan', 'normal': 'normalno', 'Normal food sources disrupted': 'Ukobičajen izvor hrane ometan', 'Normal Job': 'Normalni posao', 'Northern Cyprus': 'Sjeverni Kipar', 'Norway': 'Norveška', 'Nose, Angle': 'Nos, ugao', 'Nose, Curve': 'Nos, krivulja', 'Nose, shape': 'Nos, oblik', 'Nose, size': 'Nos, veličina', 'not accessible - no cached version available!': 'Nije dostupno- nema dostupne cache verzije!', 'not accessible - using cached version from': 'nije dostupno - koristi se cache verzija forme', 'Not allowed to Donate without matching to a Request!': 'Nije dopušteno donirati što nije usaglašeno s zahtjevom', 'Not Applicable': 'Nije primjenjivo', 'not applicable': 'nije primjenjivo', 'Not authorised!': 'Nije odobreno!', 'Not Authorised!': 'Nije dopušteno!', 'Not installed or incorrectly configured.': 'Nije instalirano ili nije pravilno konfigurirano', 'not needed': 'nije potrebno', 'Not Parsed': 'Nije još analizirano', 'Not Possible': 'Nije moguće', 'Not Set': 'Nije postavljeno', 'not specified': 'nenavedeno', 'Not Started': 'Nije započelo', 'not writable - unable to cache GeoRSS layers!': 'nemoguće pisati - nije moguće čuvati GeoRSS slojeve!', 'not writable - unable to cache KML layers!': 'nemoguće pisati - nije moguće čuvati KML slojeve!', 'Not yet a Member of any Group': 'Još nije član ni jedne grupe', 'Not yet a Member of any Team': 'Još nije član ni jednog tima', 'Note': 'Bilješka', 'Note added': 'Napomena dodana', 'Note Details': 'Detalji bilješke', 'Note that this list only shows active volunteers. To see all people registered in the system, do a search from the home screen instead.': 'Primijetite da ova lista prikazuje samo aktivne volontere. Da biste vidjeli sve ljude registrirane u sistemu, pretražite sa ovog ekrana.', 'Note that this list only shows active volunteers. To see all people registered in the system, search from this screen instead': 'Primijetite da ova lista prikazuje samo aktivne volontere. Da biste vidjeli sve ljude registrirane u sistemu, pretražite sa ovog ekrana', 'Note that when using geowebcache, this can be set in the GWC config.': 'Primijetite da kada se koristi geowebcache, ovo se može postaviti u GWC konfiguraciji.', 'Note Type': 'Vrsta bilješke', 'Note updated': 'Bilješka ažurirana', 'Note: Make sure that all the text cells are quoted in the csv file before uploading': 'Napomena: Obezbijedite da su sve ćelije teksta pod navodnicima u CSV datoteci prije postavljanja', 'Notes': 'Bilješke', 'Notice to Airmen': 'Bilješka za avijatičare', 'Notification frequency': 'Učestanost informisanja', 'Notification method': 'Metod informisanja', 'Notify': 'Informiši', 'num Zoom Levels': 'broj nivoa uvećanja', 'Number': 'Broj', 'Number of Activities': 'Broj aktivnosti', 'Number of additional beds of that type expected to become available in this unit within the next 24 hours.': 'Broj dodatnih ležaja tog tipa će, prema očekivanjima, biti dostupan u ovoj jedinici tokom sljedeća 24 sata.', 'Number of alternative places for studying': 'Broj alternativnih mjesta za studiranje', 'Number of available/vacant beds of that type in this unit at the time of reporting.': 'Broj dostupnih kreveta tog tipa u toj jedinici za vrijeme podnošenja izvještaja', 'Number of Barges': 'Broj šlepova', 'Number of Beneficiaries': 'Broj korisnika', 'Number of bodies found': 'Broj tijela pronađen', 'Number of Columns': 'Broj kolona', 'Number of Completed Assessment Forms': 'Broj završenih formulara ocjene', 'Number of deaths during the past 24 hours.': 'Broj smrtnih slučajeva u posljednja 24 sata.', 'Number of Disasters': 'Broj katastrofa', 'Number of discharged patients during the past 24 hours.': 'Broj otpuštenih pacijenata iz bolnice u posljednjih 24h.', 'Number of doctors': 'Broj doktora', 'Number of doctors actively working': 'Broj ljekara aktivno zaposlenih', 'Number of Facilities': 'Broj objekata', 'Number of houses damaged, but usable': 'Broj kuća oštećenih ali upotrebljivih', 'Number of houses destroyed/uninhabitable': 'Broj kuća uništenih/neuseljivih', 'Number of in-patients at the time of reporting.': 'Broj pacijenata u bolnici u vrijeme izvještavanja.', 'Number of Incidents': 'Broj incidenata', 'Number of Items': 'Broj stavki', 'Number of items': 'Broj stavki', 'Number of midwives actively working': 'Broj medicinskih babica aktivno zaposlenih', 'Number of newly admitted patients during the past 24 hours.': 'Broj novih primljenih pacijanata u posljednja 24 sata.', 'Number of non-medical staff': 'Broj osoblja koji nisu ukljuceni u zdravstvo', 'Number of nurses': 'Broj medicinskih sestara', 'Number of nurses actively working': 'Broj medicinskih sestara aktivno zaposlenih', 'Number of Patients': 'Broj pacijenata', 'Number of People Affected': 'Broj ljudi na koje je događaj uticao', 'Number of People Dead': 'Broj umrlih ljudi', 'Number of People Injured': 'Broj povrijeđenih ljudi', 'Number of People Required': 'Broj potrebnih ljudi', 'number of planes': 'broj aviona', 'Number of private schools': 'Broj privatnih škola', 'Number of public schools': 'Broj javnih školskih ustanova', 'Number of religious schools': 'Broj religijskih škola', 'Number of residential units': 'Broj stambenih jedinica', 'Number of residential units not habitable': 'Broj neuseljivih prebivališnih jedinica', 'Number of Resources': 'Broj resursa', 'Number of Responses': 'Broj odgovora', 'Number of Rows': 'Broj Redova', 'Number of schools damaged but usable': 'Ukupni broj škola oštećenih ali upotrebljivih', 'Number of schools destroyed/uninhabitable': 'Broj škola uništenih/neuseljivih', 'Number of schools open before disaster': 'Broj škola otvoren prije katastrofe', 'Number of schools open now': 'Broj škola trenutno otvorenih', 'Number of teachers affected by disaster': 'Broj nastavnika ugroženih zbog katastrofe?', 'Number of teachers before disaster': 'Broj nastavnika prije katastrofe', 'Number of Tugboats': 'Broj skela', 'Number of vacant/available beds in this facility. Automatically updated from daily reports.': 'Broj slobodnih/dostupnih kreveta na ovoj lokaciji. Automatski ažurirano iz dnevnih izvještaja', 'Number of vacant/available beds in this hospital. Automatically updated from daily reports.': 'Broj praznih/slobodnih kreveta u ovoj bolnici. Automatski ažurirano iz dnevnih izvještaja.', 'Number of vacant/available units to which victims can be transported immediately.': 'Broj praznih/dostupnih jedinica u koje žrtve mogu biti odmah transportovane.', 'Number or code used to mark the place of find, e.g. flag code, grid coordinates, site reference number or similar (if available)': 'Broj ili kod koji se koristi da bi se oznacilo mjesto nalazišta, npr. oznaka zastave, koordinate, referentni broj položaja ili slično (ako je dostupno)', 'Number or Label on the identification tag this person is wearing (if any).': 'Broj ili obilježje na identifikacijskoj oznaci koju osoba nosi (ukoliko postoji).', 'Number/Percentage of affected population that is Female & Aged 0-5': 'Broj/Procent pogođene populacije koju čine žene od 0-5 godina', 'Number/Percentage of affected population that is Female & Aged 13-17': 'Broj/procenat pogođene ženske populacije starosti od 13 do 17 godina', 'Number/Percentage of affected population that is Female & Aged 18-25': 'Broj/Procenat pogođene populacije koja je ženskog spola i starosti 18-25', 'Number/Percentage of affected population that is Female & Aged 26-60': 'Broj/procenat pogođene ženske populacije od 26-60 godina', 'Number/Percentage of affected population that is Female & Aged 6-12': 'Broj/postotak pogođene ženske populacije starosne dobi 6-12 godina', 'Number/Percentage of affected population that is Female & Aged 61+': 'Broj/Procenat pogođene populacije koja je ženskog pola i starosti 61+ godina', 'Number/Percentage of affected population that is Male & Aged 0-5': 'Broj / postotak zahvaćenog stanovništva koji su muškarci i godina 0-5', 'Number/Percentage of affected population that is Male & Aged 13-17': 'Broj/postotak pogođene muške populacije starosne dobi od 13-17 godina', 'Number/Percentage of affected population that is Male & Aged 18-25': 'Broj/ Procent zahvaćene populacije muškog spola u dobi od 18-25', 'Number/Percentage of affected population that is Male & Aged 26-60': 'Broj/Procent pogođenog stanovništva kojeg čine muškarci od 26-60 godina starosti', 'Number/Percentage of affected population that is Male & Aged 6-12': 'Broj/Postotak zahvaćenih muskih osoba izmedu 6 i 12 godina', 'Number/Percentage of affected population that is Male & Aged 61+': 'Broj/procenat muške populacije koja je povrijeđena i imaju 61 i više godina', 'Numbers Only': 'Samo brojevi', 'Numeric': 'Brojčano', 'Nurse': 'Medicinska sestra', 'Nursery Beds': 'Kreveti u jaslicama', 'Nursing Information Manager': 'Upravljanje informacijama o medicinskoj njezi', 'Nutrition': 'Prehrana', 'Nutrition problems': 'Problemi u ishrani', 'NZSEE Level 1': 'NZSEE Nivo 1', 'NZSEE Level 2': 'NZSEE Nivo 2', 'Object': 'Objekat', 'Objectives': 'Ciljevi', 'Observer': 'Posmatrač', 'Obsolete': 'Zastarjelo', 'obsolete': 'zastario', 'Obstetrics/Gynecology': 'Porodilište/Ginekologija', 'OCR Form Review': 'Pregled OCR forme', 'OCR module is disabled. Ask the Server Administrator to enable it.': 'OCR modul je isključen. Pitajte serverskog administratora da ga omogući.', 'OCR review data has been stored into the database successfully.': 'Podaci za OCR pregled su uspješno stavljeni u bazu podataka', 'Office': 'Kancelarija', 'Office added': 'Kancelarija dodana', 'Office Address': 'Adresa kancelarije', 'Office deleted': 'Kancelarija obrisan', 'Office Details': 'Detalji o kancelariji', 'Office Phone': 'Službeni telefon', 'Office Type': 'Tip kancelarije', 'Office Type added': 'Dodan tip kancelarije', 'Office Type deleted': 'Obrisan tip kancelarije', 'Office Type Details': 'Detalji tipa kancelarije', 'Office Type updated': 'Ažuriran tip kancelarije', 'Office Types': 'Tipovi kancelarija', 'Office updated': 'Kancelarija ažurirana', 'Offices': 'Kancelarije', 'Offices & Warehouses': 'Uredi i skladišta', 'Offline Sync': 'Vanmrežna sinhronizacija', 'Offline Sync (from USB/File Backup)': 'Vanmrežna Sinhronizacija (sa USB-a/Pomoćnih Dokumenata)', 'Oil Terminal Depth': 'Dubina naftnog terminala', 'Old': 'Star', 'Older people as primary caregivers of children': 'Stariji ljudi, kao primarni staratelji djece', 'Older people in care homes': 'Stariji ljudi u domovima', 'Older people participating in coping activities': 'Stariji ljudi koji učestvuju u aktivnostima prilagođavanja', 'Older people with chronical illnesses': 'Stariji ljudi s hroničnim bolestima', 'Older person (>60 yrs)': 'Starije osobe (preko 60 god.)', 'on': 'uključeno', 'on %(date)s': 'na %(date)s', 'On by default?': 'Uključeno prema podrazumijevanoj vrijednosti?', 'On by default? (only applicable to Overlays)': 'Automatski postavljeno na uključeno? (Jedino se može primjeniti na Preglede)', 'On Hold': 'Na čekanju', 'On Order': 'U narudžbi', 'On Scene': 'Na sceni', 'On-site Hospitalization': 'Hospitalizacija na licu mjesa', 'once': 'jednom', 'One time cost': 'Jednokratna cijena', 'One Time Cost': 'Jednokratni trošak', 'One-time': 'Jedanput', 'One-time costs': 'Jednokratni troškovi', "Only Categories of type 'Asset' will be seen in the dropdown.": "Samo kategorije tipa 'Sredstvo' će se vidjeti u padajućoj listi.", "Only Categories of type 'Vehicle' will be seen in the dropdown.": "Samo kategorije tipa 'Vozilo' će se vidjeti u padajućoj listi.", "Only Items whose Category are of type 'Vehicle' will be seen in the dropdown.": "Samo stavke čija je kategorija tipa 'Vozilo' će se vidjeti u padajućoj listi.", 'Only showing accessible records!': 'Prikazujem samo pristupačne zapise!', 'Only use this button to accept back into stock some items that were returned from a delivery to beneficiaries who do not record the shipment details directly into the system': 'Koristite ovo dugme za prihvatanje nazad u zalihu stavki koje su vraćene iz isporuke korisnjicima koji nisu zabilježili detalje o isporuci direktno u sistem', 'Only use this button to confirm that the shipment has been received by a destination which will not record the shipment directly into the system': 'Koristite ovo dugme da potvrdite da je pošiljka stigla na odredište bez bilježenja pošiljke direktno u sistem', 'Oops! something went wrong on our side.': 'Oops! Nešto je krenulo po zlu.', 'Oops! Something went wrong...': 'Ups! Nešto nije u redu...', 'Opacity': 'Neprozirnost', 'Opacity (1 for opaque, 0 for fully-transparent)': 'Neprozirnost (1 za neprozirno, 0 potpuno providno)', 'Open': 'Otvori', 'Open area': 'Otvoreno područje', 'Open Chart': 'Otvori dijagram', 'open defecation': 'otvorena defekacija', 'Open in New Tab': 'Otvori u novoj kartici', 'Open Incidents': 'Otvori incidente', 'Open Map': 'Otvori mapu', 'Open recent': 'Otvorni skorašnje', 'Open Report': 'Otvori izvještaj', 'Open Table': 'Otvori tabelu', 'Open Tasks for %(project)s': 'Otvoreni zadaci za %(project)s', 'Open Tasks for Project': 'Otvoreni zadaci za projekt', 'Opening Times': 'Radna vremena', 'OpenStreetMap Layer': 'OpenStreetMap sloj', 'OpenStreetMap OAuth Consumer Key': 'OpenStreetMap OAuth korisnički ključ', 'OpenStreetMap OAuth Consumer Secret': 'OpenStreetMap OAuth korisnička tajna lozinka', 'OpenWeatherMap Layer': 'OpenWeatherMap sloj', 'Operating Rooms': 'Operacione sale', 'Operational': 'Operativno', 'Opportunities to Volunteer On-Site?': 'Prilike za volontera na lokaciji?', 'Opportunities to Volunteer Remotely?': 'Prilike za volotera za udaljeni rad?', 'Option': 'Opcija', 'Option Other': 'Opcija druga', 'Optional': 'Neobavezno', 'optional': 'opcionalno', 'Optional link to an Incident which this Assessment was triggered by.': 'Izborni link na incident koji je potaknuo ovu procjenu.', 'Optional password for HTTP Basic Authentication.': 'Opcionalna lozinka za HTTP osnovnu autentifikaciju', 'Optional selection of a background color.': 'Izbor boje pozadine', 'Optional selection of a MapServer map.': 'Opcionalan izbor MapSever karte.', 'Optional selection of an alternate style.': 'Neobavezna selekcija alternativnog stila', 'Optional Subject to put into Email - can be used as a Security Password by the service provider': 'Neobavezni predmet za staviti u Email - može biti korišten kao sigurnosna šifra od strane pružatelja usluga.', 'Optional username for HTTP Basic Authentication.': 'Opcionalno ime korisnika za HTTP osnovnu autentifikaciju', 'Optional. If you wish to style the features based on values of an attribute, select the attribute to use here.': 'Opcionalno. Ako želite stilizirati karakteristike na osnovu vrijednosti atributa, ovdje izaberite atribute koje ćete koristiti.', 'Optional. In GeoServer, this is the Workspace Namespace URI (not the name!). Within the WFS getCapabilities, the workspace is the FeatureType Name part before the colon(:).': 'Opcionalno. U GeoServer-u, ovo je imenski prostor (ne ime!) radnog prostora URI. U sklopu WFS getCapabilities, radni prostor je FeatureType ime prije dvotačke (:).', 'Optional. In GeoServer, this is the Workspace Namespace URI (not the name!). Within the WFS getCapabilities, this is the FeatureType Name part before the colon(:).': 'Opcionalno. U GeoServer-u, ovo je URI imenskog prostora (ne ime!) . U sklopu WFS getCapabilities, ovo je dio tipa osobine prije dvotačke (:).', 'Optional. In GeoServer, this is the Workspace Namespace URI. Within the WFS getCapabilities, this is the FeatureType Name part before the colon(:).': 'Opcionalno. U GeoServer-u, ovo je imenski prostor radnog prostora URI. U sklopu WFS getCapabilities, ovo je dio tipa osobine prije dvotačke (:).', 'Optional. The name of an element whose contents should be a URL of an Image file put into Popups.': 'Izborno. Naziv elementa čiji sadržaji trebaju biti URL slikovne datoteke stavljene u Popup-e.', 'Optional. The name of an element whose contents should be put into Popups.': 'Opcionalno. Ime elementa čiji sadržaj bi trebao biti unutar iskočnih prozora.', "Optional. The name of the geometry column. In PostGIS this defaults to 'the_geom'.": "Opcionalno. Ime geometrijske kolone. U postGIS ovo je automatski 'the_geom'.", 'Optional. The name of the schema. In Geoserver this has the form http://host_name/geoserver/wfs/DescribeFeatureType?version=1.1.0&;typename=workspace_name:layer_name.': 'Opcionalno. Ime šeme. Na Geoserveru ovo ima formu http://host_name/geoserver/wfs/DescribeFeatureType?version=1.1.0&;typename=workspace_name:layer_name.', 'Options': 'Opcije', 'or': 'ili', 'Or add a new language code': 'Ili dodaj novu šifru jezika', 'or import from csv file': 'ili uvoz iz CSV dokumenta', 'OR Reason': 'OR razlog', 'OR Status Reason': 'OR Status razlog', 'Order': 'Narudžba', 'Order canceled': 'Narudžba otkazana', 'Order Created': 'Narudžba kreirana', 'Order Details': 'Detalji narudžbe', 'Order Due %(date)s': 'Rok narudžbe %(date)s', 'Order Item': 'Stavka narudžbe', 'Order updated': 'Narudžba ažurirana', 'Ordered list ... (#TODO [String])': 'Uerđena lista ... (#TODO [String])', 'Orders': 'Narudžbe', 'Organization': 'Organizacija', 'Organization added': 'Dodana organizacija', 'Organization added to Policy/Strategy': 'Organizacija dodana u politiku/strategiju', 'Organization added to Project': 'Organizacija dodana u projekt', 'Organization deleted': 'Obrisana organizacija', 'Organization Details': 'Detalji organizacije', 'Organization Domain added': 'Domena organizacije dodana', 'Organization Domain deleted': 'Domena organizacije obrisana', 'Organization Domain Details': 'Detalji domene organizacije', 'Organization Domain updated': 'Domena organizacije ažurirana', 'Organization Domains': 'Domene organizacije', 'Organization Group': 'Grupa organizacija', 'Organization group': 'Grupe organizacija', 'Organization Needs': 'Potrebe organizacije', 'Organization Needs added': 'Potrebe organizacije dodane', 'Organization Needs deleted': 'Potrebe organizacije obrisane', 'Organization Needs updated': 'Potrebe organizacije ažurirane', 'Organization Registry': 'Registar Organizacija', 'Organization removed from Policy/Strategy': 'Organizacija uklonjena iz politike/strategije', 'Organization removed from Project': 'Organizacija uklonjena sa projekta', 'Organization Type': 'Tip organizacije', 'Organization Type added': 'Vrsta organizacije dodana', 'Organization Type deleted': 'Vrsta organizacije obrisana', 'Organization Type Details': 'Detalji tipa organizacije', 'Organization Type updated': 'Vrsta organizacije ažurirana', 'Organization Types': 'Tipovi organizacije', 'Organization Units': 'Organizacione jedinice', 'Organization updated': 'Ažurirana organizacija', 'Organization(s)': 'Organizacije', 'Organization/Branch': 'Organizacija/Ogranak', 'Organization/Supplier': 'Organizacija/dobavljač', 'Organization:': 'Organizacija:', 'Organizations': 'Organizacije', 'Organizations / Teams / Facilities': 'Organizacije / Timovi / Objekti', 'Organized By': 'Organizovao', 'Origin': 'Porijeklo', 'Origin of the separated children': 'Porijeklo odvojene djece', 'Original': 'Izvorno', 'Original Quantity': 'Izvorna količina', 'Original Value per Pack': 'Izvorna vrijednost po paketu', 'OSM file generation failed!': 'Generisanje OSM datoteke neuspjelo!', 'OSM file generation failed: %s': 'Generisanje OSM datoteke neuspjelo: %s', 'Other': 'Ostalo', 'other': 'drugo', 'Other (describe)': 'Ostalo (opis)', 'Other (specify)': 'Ostalo (navedi)', 'Other activities of boys 13-17yrs': 'Ostale aktivnosti dječaka od 13-17 godina', 'Other activities of boys 13-17yrs before disaster': 'Druge aktivnosti dječaka uzrasta od 13 do 17 godina, prije nesreće', 'Other activities of boys <12yrs': 'Druge aktivnosti dječaka mlađih od 12 godina', 'Other activities of boys <12yrs before disaster': 'Druge aktivnosti dječaka <12godina prije nesreće', 'Other activities of girls 13-17yrs': 'Druge aktivnosti djevojčiva 13-17 godina', 'Other activities of girls 13-17yrs before disaster': 'Ostale aktivnosti djevojčica 13-17 godina prije katastrofe', 'Other activities of girls<12yrs': 'Druge aktivnosti djevojčica < 12godina', 'Other activities of girls<12yrs before disaster': 'Ostale aktivnosti djevojčica <12 godina prije katastrofe', 'Other Address': 'Druga Adresa', 'Other alternative infant nutrition in use': 'Druga alternativa u prehrani dojenčadi u upotrebi', 'Other alternative places for study': 'Alternativna mjesta za učenje', 'Other assistance needed': 'Potrebna druga pomoć', 'Other assistance, Rank': 'Ostale vrte pomoći, Poredak', 'Other current health problems, adults': 'Ostali aktuelni zdravstveni problemi odraslih', 'Other current health problems, children': 'Drugi trenutni zdravstveni problemi, djeca', 'Other Details': 'Drugi detalji', 'Other events': 'Drugi događaji', 'Other Evidence': 'Ostali dokazi', 'Other factors affecting school attendance': 'Drugi faktori koji utiču na pohađanje škole', 'Other Faucet/Piped Water': 'Druga voda iz slavine/vodovoda', 'Other Inventories': 'Druge zalihe', 'Other Isolation': 'Druge izolacije', 'Other major expenses': 'Drugi veći troškovi', 'Other Name': 'Drugo ime', 'Other non-food items': 'Ostali neprehrambeni artikli', 'Other recommendations': 'Ostale preporuke', 'Other residential': 'Ostala prebivališta', 'Other school assistance received': 'Druga nastavna pomoć primljena', 'Other school assistance, details': 'Asistencija ostalih škola, detalji', 'Other school assistance, source': 'Ostale pomoći u školi, izvor', 'Other settings can only be set by editing a file on the server': 'Ostale postavke mogu biti postavljene jedino uređivanjem datoteke na serveru', 'Other side dishes in stock': 'ostali prilozi u zalihama', 'Other types of water storage containers': 'Drugi tipovi spremnika za vodu', 'Other Users': 'Drugi korisnici', 'Other ways to obtain food': 'Drugi načini za dobavu hrane', 'Others': 'Ostali', 'Out': 'Van', 'Outbound Mail settings are configured in models/000_config.py.': 'Poruke koje se salju su konfigurisane u models/000_config.py.', 'Outbox': 'Za slanje', 'Outcomes, Impact, Challenges': 'Izlazi, utjecaj izazovi', 'Outgoing SMS handler': 'Rukovodilac odlaznih SMS poruka', 'Outgoing SMS Handler': 'Upravljač izlaznog SMS', 'Output': 'Izlaz', 'Output added': 'Izlaz dodan', 'Output deleted': 'Izlaz uklonjen', 'Output updated': 'Izlaz ažuriran', 'Outputs': 'Izlazi', 'oval': 'ovalno', 'over one hour': 'preko sat', 'Overall Hazards': 'Ukupni rizici', 'Overall status of the clinical operations.': 'Ukupni status kliničkih operacija.', 'Overall status of the facility operations.': 'Ukupni status operacija objekta', 'Overhead falling hazard': 'Opasnost od predmeta koji padaju s visine', 'Overland Flow Flood': 'Poplava kopnenog toka', 'Overlays': 'Preklopi', 'Overview': 'Pregled', 'Owned By (Organization/Branch)': 'Vlasnik (organizacija/grana)', 'Owned Records': 'Broj zapisa u vlasništvu', 'Owned Resources': 'Posjedovani resursi', 'Ownership': 'Vlasništvo', 'Owning Organization': 'Vlasnička organizacija', 'Pacific Islands Framework for Action on Climate Change. Applicable to projects in Pacific countries only': 'Okvir pacifičkih ostrva za akcije o klimatskim promjenama. Primjenjivo samo na projekte u pacifičkim zemljama', 'Pack': 'Paket', 'pack of 10': 'Paket od 10', 'Packet': 'Paket', 'Packs': 'Paketi', 'Page': 'Stranica', 'painted': 'nacrtano', 'Pan Map: keep the left mouse button pressed and drag the map': 'Prevlačenje mape: držite lijevo dugme miša pritisnuto i vucite mapu', 'Papua New Guinea': 'Papua Nova Gvineja', 'Paraguay': 'Paragvaj', 'Parameters': 'Parametri', 'Parapets, ornamentation': 'Parapeti, ukrašavanja', 'Parent': 'Roditelj', 'Parent Item': 'Nadređena stavka', "Parent level should be higher than this record's level. Parent level is": 'Roditeljski nivo bi trebao biti viši od nivoa ovog zapisa. Roditeljski nivo je', 'Parent needs to be of the correct level': 'Roditelj treba da bude na odgovarajućoj razini', 'Parent needs to be set': 'Roditelj treba biti postavljen', 'Parent needs to be set for locations of level': 'Roditelj treba bit postavljen na lokacijama nivoa', 'Parent Office': 'Matični ured', 'Parents/Caregivers missing children': 'Roditelji/Staratelji djece koja su nestala', 'Parking Area': 'Parking područje', 'Parking/Tarmac Space Capacity': 'Kapacitet parking prostora', 'Parking/Tarmac Space Units': 'Jedinice parking prostora', 'Parse': 'Raščlani', 'Parsed': 'Raščlanjeno', 'Parser connected': 'Povezano kroz parser', 'Parser Connection Details': 'Detalji parserske konekcije', 'Parser connection removed': 'Uklonjena parserska konekcija', 'Parser connection updated': 'Parserska konekcija je ažurirana', 'Parser Connections': 'Parserske konekcije', 'Parsing Settings': 'Postavke parsera', 'Parsing Status': 'Status parsera', 'part': 'dio', 'Part of the URL to call to access the Features': 'Dio URL koji se zove za pristup objektima', 'Partial': 'Djelimično', 'Partial Database Synchronization': 'Djelomična sinhronizacija sa bazom podataka', 'Participant': 'Učesnik', 'Participant added': 'Učesnik dodan', 'Participant deleted': 'Učesnik obrisan', 'Participant Details': 'Detalji učesnika', 'Participant updated': 'Učesnik ažuriran', 'Participants': 'Učesnici', 'Partner added': 'Partner dodan', 'Partner deleted': 'Partner izbrisan', 'Partner Details': 'Detalji partnera', 'Partner Organization added': 'Organizacija partnera dodana', 'Partner Organization deleted': 'Organizacija partnera obrisana', 'Partner Organization Details': 'Detalji partnersk organizacije', 'Partner Organization updated': 'Organizacija partnera ažurirana', 'Partner Organizations': 'Partnerske organizacije', 'Partner updated': 'Partner ažuriran.', 'Partners': 'Partneri', 'Pashto': 'Pašto', 'Pass': 'Prolaz', 'Passport': 'Pasoš', 'Password': 'Lozinka', "Password fields don't match": 'Polja za lozinku se ne podudaraju', 'Password to use for authentication at the remote site.': 'Lozinka za prijavu na udaljeni sajt-', 'Path': 'Putanja', 'Pathology': 'Patologija', 'Patient': 'Pacijent', 'Patient added': 'Pacijent dodan', 'Patient deleted': 'Pacijent obrisan', 'Patient Details': 'Detalji o pacijentu', 'Patient Tracking': 'Praćenje pacijenta', 'Patient Transportation Ambulance': 'Ambulanta za transport pacijenata', 'Patient updated': 'Pacijent ažuriran', 'Patients': 'Pacijenti', 'PDF File': 'PDF datoteka', 'Pediatric ICU': 'Pedijatrijska intenzivna njega', 'Pediatric Psychiatric': 'Pedijatrijsko psihijatrijsko', 'Pediatrics': 'Doktor za bolesti i povrijede djece i maloljetnika', 'Peer': 'Saradnik', 'Peer added': 'Saradnik dodan', 'Peer deleted': 'Saradnik obrisan', 'Peer Details': 'Detalji o saradniku', 'Peer not allowed to push': 'Nije dozvoljeno gurati saradnika', 'Peer Registration': 'Registracija saradnika', 'Peer Registration Details': 'Detalji registracije saradnika', 'Peer Registration Request': 'Zahtjev za registraciju saradnika', 'Peer registration request added': 'Dodat zahtjev za registraciju saradnika', 'Peer registration request deleted': 'Zahtjev za registracijom saradnika je obrisan', 'Peer registration request updated': 'Zahtjev za registraciju saradnika ažuriran', 'Peer Type': 'Tip saradnika', 'Peer UID': 'UID saradnika', 'Peer updated': 'Ažuriran saradnik', 'Peers': 'Saradnici', 'pending': 'čeka', 'Pending': 'U toku', 'Pending Requests': 'Zahtjevi na čekanju', 'people': 'ljudi', 'People': 'Ljudi', 'People added to Commitment': 'Ljudi dodani u zaduženje', 'People Needing Food': 'Ljudi koji trebaju hranu', 'People Needing Shelter': 'Ljudi kojima je potrebno sklonište', 'People Needing Water': 'Ljudi koji trebaju vodu', 'People removed from Commitment': 'Osobe odstranjene iz zaduženja', 'People Trapped': 'Zarobljeni ljudi', 'People with chronical illnesses': 'Ljudi s hroničnim bolestima', 'per': 'po', 'Percentage': 'Procenat', 'Performance Rating': 'Ocjena izvedbe', 'Permanent Home Address': 'Stalna kućna adresa', 'Person': 'Osoba', 'Person 1': 'Osoba broj 1', 'Person 1, Person 2 are the potentially duplicate records': 'Osoba 1, Osoba 2 su mogući dupli zapisi', 'Person 2': 'Osoba 2', 'Person added': 'Osoba dodana', 'Person added to Commitment': 'Osoba dodana zaduženju', 'Person added to Group': 'Osoba dodana u grupu', 'Person added to Team': 'Osoba dodana u tim', 'Person Data': 'Lični podaci', 'Person De-duplicator': 'Deduplikator osoba', 'Person deleted': 'Osoba obrisana', 'Person Details': 'Detalji osobe', 'Person details updated': 'Detalji o osobi ažurirani', 'Person Entity': 'Jedinka osobe', 'Person Finder': 'Nalazač osoba', 'Person found': 'Osoba pronađena', 'Person interviewed': 'Osoba ispitana', 'Person Management': 'Upravljanje osobljem', 'Person missing': 'Nedostaje osoba', 'Person must be specified!': 'Osoba se mora navesti!', 'Person or OU': 'Osoba ili OJ', 'Person Registry': 'Registar osoba', 'Person removed from Commitment': 'Osoba odstranjena iz zaduženja', 'Person removed from Group': 'Osoba odstranjena iz grupe', 'Person removed from Team': 'Osoba odstranjena iz tima', 'Person reporting': 'Osoba koja je prijavila', 'Person Transportation Tactical Vehicle': 'Lično transportno taktičko vozilo', 'Person updated': 'Osoba ažurirana', 'Person who has actually seen the person/group.': 'Osoba koja je zapravo vidjela osobu/grupu.', "Person's Details": 'Detalji osobe', "Person's Details added": 'Detalji o osobi dodani', "Person's Details deleted": 'Detalji o osobi obrisani', "Person's Details updated": 'Detalji o osobi ažurirani', 'Person.': 'Osoba', 'Person/Group': 'Osoba/Grupa', 'Personal': 'Lično', 'Personal Data': 'Lični podaci', 'Personal Effects': 'Lični efekti', 'Personal Effects Details': 'Detalji ličnih uticaja', 'Personal impact of disaster': 'Lični utjecaj katastrofe', 'Personal Map': 'Lična mapa', 'Personal Profile': 'Lični profil', 'Persons': 'Osobe', 'Persons in institutions': 'Osobe u institucijama', 'Persons per Dwelling': 'Osoba po stambenoj jedinici', 'Persons with disability (mental)': 'Osobe sa (mentalnim) invaliditetom', 'Persons with disability (physical)': 'Osobe s invaliditetom (tjelesnim)', "Persons' Details": 'Detalji o osobama', 'Philippines': 'Filipini', 'Phone': 'Telefon', 'Phone #': 'Telefon #', 'Phone 1': 'Telefon 1', 'Phone 2': 'Telefon 2', 'Phone number is required': 'Potreban je telefonski broj', "Phone number to donate to this organization's relief efforts.": 'Telefonski broj na koji se donira pomoć žrtvama ove organizacije.', 'Phone/Business': 'Telefon/ Posao', 'Phone/Emergency': 'Telefon/Hitni', 'Phone/Exchange': 'Telefon/razmjena', 'Phone/Exchange (Switchboard)': 'Telefonska centrala', 'Photo': 'Fotografija', 'Photo added': 'Fotografija dodana', 'Photo deleted': 'Fotografija obrisana', 'Photo Details': 'Detalji o fotografiji', 'Photo Taken?': 'Fotografija napravljena?', 'Photo updated': 'Fotografija ažurirana', 'Photograph': 'Fotografija', 'Photos': 'Fotografije', 'Physical': 'Fizički', 'Physical Description': 'Fizički opis', 'Physical Safety': 'Fizička sigurnost', 'Picture': 'Slika', 'Picture upload and finger print upload facility': 'Mogućnost uploada slike i otiska prsta', 'piece': 'Dio', 'PIFACC Priorities': 'PIFACC Prioriteti', 'PIFACC-1: Implementing Tangible, On-Ground Adaptation Measures': 'PIFACC-1: Implementacija realnih mjerenja s adaptacijom na zemljičtu', 'PIFACC-2: Governance and Decision Making': 'PIFACC-2: Vlada i donošenje odluka', 'PIFACC-3: Improving our understanding of climate change': 'PIFACC-3: Poboljšanje razumijevanja klimatskih promjena', 'PIFACC-4: Education, Training and Awareness': 'PIFACC-4: Obrazovanje, obuka i informisanost', 'PIFACC-5: Mitigation of Global Greenhouse Gas Emissions': 'PIFACC-5: Smanjenje globalnog ispuštanja gasova koji izazivaju efekat staklene bašte', 'PIFACC-6: Partnerships and Cooperation': 'PIFACC-6: Partnerstvo i saradnja', 'PIL (Python Image Library) not installed': 'PIL (Python Image Library) nije instalirana', 'PIN': 'PIN', 'PIN number': 'PIN broj', 'PIN number ': 'PIN broj - osobni identifikacijski broj ', 'Pipe': 'Cijev', 'pit': 'jama', 'pit latrine': 'septička jama poljskog zahoda', 'PL Women': 'PL žene', 'Place': 'Mjesto', 'Place for solid waste disposal': 'Mjesto za ostavljanje čvrstog smeća', 'Place of Birth': 'Mjesto rođenja', 'Place of find': 'Mjesto pronalaska', 'Place of Recovery': 'Mjesto oporavka', 'Place on Map': 'Mjesto na karti', 'Places for defecation': 'Mjesta za vršenje nužde', 'Places the children have been sent to': 'Mjesta gdje su djeca poslana', 'Planned': 'Planirano', 'Planned %(date)s': 'Planirano %(date)s', 'Planned Procurement': 'Planirana nabava', 'Planned Procurement Item': 'Planirana stavka nabavke', 'Planned Procurements': 'Planirane nabave', 'Playing': 'Izvršava', 'Please choose a type': 'Odaberite tip', "Please come back after sometime if that doesn't help.": 'Molim vratite se nakon nekog vremena, ako to ne pomogne.', 'Please correct all errors.': 'Molim da ispravite sve greške.', 'Please do not remove this sheet': 'Molim da ne brišete ovaj list', 'Please enter a %(site)s': 'Molimo unesite %(site)s', 'Please enter a %(site)s OR an Organization': 'Molimo unesite %(site)s ILI organizaciju', 'Please enter a first name': 'Molimo unesite ime', 'Please enter a last name': 'Molim, unesite prezime', 'Please enter a number only': 'Molim unesite samo broj', 'Please enter a site OR a location': 'Molimo unesite mjesto ILI lokaciju', 'Please enter a valid email address': 'Unesite važeću adresu elektronske pošte', 'Please enter an Organization/Supplier': 'Molimo unesite organizaciju/dobaljača', 'Please enter details of the Request': 'Molim unesite detalje zahtjeva', 'Please enter request details here.': 'Molim unesite detalje zahtjeva ovdje.', 'Please enter the details on the next screen.': 'Molim unesite detalje zahtjeva na sljedećem ekranu.', 'Please enter the first few letters of the Person/Group for the autocomplete.': 'Molimo unesite prvih nekoliko slova Osobe/Grupe za automatsko popunjavanje.', 'Please enter the recipient': 'Molimo dodajte primatelja', 'Please enter the recipient(s)': 'Molimo unesite primatelja', 'Please fill this!': 'Molim Vas popunite ovo!', 'Please give an estimated figure about how many bodies have been found.': 'Molimo dajte okvirnu procjenu koliko je tijela pronađeno.', "Please provide as much detail as you can, including the URL(s) where the bug occurs or you'd like the new feature to go.": 'Molim navedite što više detalja, uključujući URL gdje se greška dešava ili želite nove mogućnosti.', 'Please provide the URL of the page you are referring to, a description of what you expected to happen & what actually happened.': 'Molimo unesite URL stranice na koju mislite, opis onoga što očekujete da će se desiti i onoga što se ustvari dogodilo', 'Please record Beneficiary according to the reporting needs of your project': 'Molim zapišite korisnmika prema potrebama izvještavanja vašeg projekta', 'Please report here where you are:': 'Molim Vas da ovdje prijavite gdje se nalazite:', 'Please select': 'Označite,molim', 'Please Select a Facility': 'Molim odaberite objekat', 'Please select a valid image!': 'Molim izaberite ispravnu sliku', 'Please select another level': 'Molimo odaberite drugi nivo', 'Please select exactly two records': 'Molim odaberite tačno dva zapisa', 'Please sign-up with your Cell Phone as this allows us to send you Text messages. Please include full Area code.': 'Molimo da se prijavite koristeći svoj broj mobilnog telefona, jer nam to omogućava da vam šaljemo SMS poruke. Molimo da napišete kompletan pozivni broj.', 'Please specify any problems and obstacles with the proper handling of the disease, in detail (in numbers, where appropriate). You may also add suggestions the situation could be improved.': 'Molimo detaljno specificirajte probleme i prepreke sa pravilnim pristupanjem bolesti (u brojevima, gdje je prikladno). Možete također dodati prijedloge gdje bi situacija mogla biti poboljšana.', 'Please use this field to record any additional information, including a history of the record if it is updated.': 'Molimo koristite ovo polje da popunite dodatne informacije, uključujući istorijat zapisa ako je ažuriran.', 'Please use this field to record any additional information, including any Special Needs.': 'Molimo Vas iskorisite ovo polje da snimite dodatne informacije, uključujući bilo kakve specijalne potrebe.', 'Please use this field to record any additional information, such as Ushahidi instance IDs. Include a history of the record if it is updated.': 'Molimo koristite ovo polje da zabilježite dodatne informacije, poput Ushahidi ID instance. Uključite istorijat zapisa ako je ažuriran.', 'Pledge': 'Obećati podršku', 'Pledge Aid': 'Ponudi pomoć', 'Pledge Aid to match these Requests': 'Ponudi pomoć koja je usaglašena s ovim zahtjevima', 'Pledge Status': 'Status ponude pomoći', 'Pledge Support': 'Obećati podršku', 'Pledges': 'Ponude za pomoć', 'PoI': 'Tačka interesa', 'PoI Type added': 'Tip tačke interesa dodan', 'PoI Type deleted': 'Tip tačke interesa obrisan', 'PoI Type Details': 'Detalji o vrsti tačke interesa', 'PoI Type updated': 'Tip tačke interesa ažuriran', 'PoI Types': 'Tipovi tačaka interesa', 'Point': 'Tačka', 'Point of Interest added': 'Tačka interesa dodana', 'Point of Interest deleted': 'Tačka interesa obrisana', 'Point of Interest Details': 'Detalji tačaka interesa', 'Point of Interest updated': 'Tačka interesa ažurirana', 'pointed': 'označeno', 'Points of Interest': 'Tačke interesa', 'PoIs': 'Tačke interesa', 'PoIs successfully imported.': 'Tačke interesa uspješno uvezene.', 'Poisoning': 'Trovanje', 'Poisonous Gas': 'Otrovni gas', 'Poland': 'Poljska', 'Police': 'Policija', 'Policies & Strategies': 'Politike ili strategije', 'Policy': 'Pravila', 'Policy or Strategy': 'Politika ili strategija', 'Policy or Strategy added': 'Politika ili strategija dodana', "Policy or Strategy added, awaiting administrator's approval": 'Dodana politika ili strategija, čeka se na potvrdu administratora.', 'Policy or Strategy deleted': 'Politika ili strategija obrisana', 'Policy or Strategy updated': 'Politika ili strategija ažurirana', 'Poll': 'Anketa', 'Pollution and other environmental': 'Zagađenja i druge okolišne', 'Polygon': 'Poligon', 'Polygon reference of the rating unit': 'Poligon referenca jedinice za procjenu', 'Poor': 'Slabo', 'Population': 'Populacija', 'Population and number of households': 'Stanovnistvo i broj domacinstava', 'Population Statistic added': 'Statistika o populaciji dodana', 'Population Statistic deleted': 'Statistika stanovništva izbrisana', 'Population Statistic Details': 'Statističke pojedinosti populacije', 'Population Statistic updated': 'Statistika stanovništva osvježena', 'Population Statistics': 'Statistika o populaciji', 'Popup Fields': 'Popup stavke', 'Popup Label': 'Iskočna oznaka', 'Porridge': 'Kaša', 'Port Closure': 'Zatvaranje luke', 'Portable App': 'Prenosiva aplikacije', 'Portuguese': 'Portugalski', 'Portuguese (Brazil)': 'Portugalski (Brazil)', 'Position': 'Pozicija', 'Position added': 'Pozicija dodana', 'Position Catalog': 'Katalog Položaja', 'Position deleted': 'Pozicija izbrisana', 'Position Details': 'Detalji pozicije', 'Position in tour': 'Položaj na turi', 'Position updated': 'Pozicija ažurirana', 'Positions': 'Položaji', 'Post': 'Ubacivi tekst', 'Post added': 'Ubacivi tekst dodan', 'Post deleted': 'Ubacivi tekst obrisan', 'Post Details': 'Detalji ubacivog teksta', 'Post removed': 'Ubacivi tekst uklonjen', 'Post set as Module/Resource homepage': 'Ubacivi tekst postavljen kao početna stranica modula/resursa', 'Post Tagged': 'Ubacivi tekst označen', 'Post updated': 'Ubacivi tekst ažuriran', 'Post-impact shelterees are there for a longer time, so need more space to Sleep.': 'Skloništa poslije nesrećnog utjecaja su ovdje za duže vrijeme, pa je potrebno više prostora za spavanje.', 'Postcode': 'Poštanski broj', 'Posted on': 'Postavljeno', 'postponed': 'odgođeno', 'Posts': 'Ubacivi tekstovi', 'Poultry': 'Perad', 'Poultry restocking, Rank': 'Obnova zaliha peradi, Rang', 'Pounds': 'Funte', 'Power': 'Napajanje', 'Power Failure': 'Nestanak struje', 'Power Outage': 'Nestanak napajanja', 'Power Supply Type': 'Vrsta napajanja', 'Powered by Sahana Eden': 'Omogućeno od strane Sahana Eden', 'Pre-cast connections': 'Konekcije prije emitovanja', 'Preferred Name': 'Preferirano ime', 'Pregnant women': 'Trudnica', 'Preliminary': 'Preeliminarno', 'preliminary template or draft, not actionable in its current form': 'preliminarni šablon ili nacrt, nije kažnjiv u trenutnom obliku', 'Prepare Shipment': 'Pripremi ošiljku', 'Presence': 'Prisustvo', 'Presence Condition': 'stanje prisutnosti', 'Presence Log': 'Zapisnik prisustva', 'Previous': 'Prethodni', 'previous 100 rows': 'prethodnih 100 redova', 'Previous View': 'Prethodni prikaz', 'primary incident': 'primarni incident', 'Primary Name': 'Primarni naziv', 'Primary Occupancy': 'Primarno zanimanje', 'Principal': 'Najvažnije', 'Print': 'Štampa', 'Priority': 'Prioritet', 'Priority from 1 to 9. 1 is most preferred.': 'Prioritet od 1 do 9. 1 je najviše željen.', 'Priority Level': 'Nivo prioriteta', 'Privacy': 'Privatnost', 'Private': 'Privatno', 'Problem added': 'Problem dodan', 'Problem Administration': 'Upravljanje problemima', 'Problem connecting to twitter.com - please refresh': 'Problem konektovanja na twitter.com - molimo osvježite stranicu', 'problem connecting to twitter.com - please refresh': 'Problem konektovanja na twitter.com - molimo osvježite stranicu', 'Problem deleted': 'Izbrisan problem', 'Problem Details': 'Detalji problema', 'Problem Group': 'Grupa problema', 'Problem Title': 'Naslov problema', 'Problem updated': 'Problem ažuriran', 'Problems': 'Problemi', 'Problems? Please call': 'Problemi? Molim pozovite', 'Procedure': 'Procedura', 'Process Received Shipment': 'Obradi primljenu isporuku', 'Process Shipment to Send': 'Procesiraj pošiljku za slanje', 'Processed with KeyGraph?': 'Obrađeno koristeći KeyGraph', 'Processing': 'Obrada', 'Procured': 'Nabavljeno', 'Product Description': 'Opisproizvoda', 'Profession': 'Profesija', 'Professional Experience': 'Profesionalno iskustvo', 'Professional Experience added': 'Profesionalno iskustvo dodano', 'Professional Experience deleted': 'Profesionalno iskustvo obrisano', 'Professional Experience Details': 'Detalji profesionalnog iskustva', 'Professional Experience updated': 'Profesionalno iskustvo ažurirano', 'Profile': 'Profil', 'Profile Configuration': 'Konfiguracija Profila', 'Profile Configuration removed': 'Konfiguracija profila izbrisana', 'Profile Configuration updated': 'Ažurirana konfiguracija mape', 'Profile Configurations': 'Konfiguracije profila', 'Profile Configured': 'Profil konfigurisan', 'Profile Details': 'Detalji profila', 'Profile Page': 'Stranica profila', 'Profile Picture': 'Slika profila', 'Profile Picture?': 'Slika profila?', 'Profiles': 'Profili', 'Program added': 'Program dodan', 'Program deleted': 'Program obrisan', 'Program Details': 'Detalji programa', 'Program Hours (Month)': 'Programski sati (mjeseci)', 'Program Hours (Year)': 'Programski sati (godina)', 'Program updated': 'Program ažuriran', 'Programs': 'Programi', 'Project': 'Projekt', 'Project Activity': 'Aktivnosti projekta', 'Project added': 'Projekat je dodan', 'Project Calendar': 'Kalendar projekta', 'Project deleted': 'Projekat je obrisan', 'Project Details': 'Detalji Projekta', 'Project Framework': 'Radni okvir projekta', 'Project has no Lat/Lon': 'Projekat nema Lat/Lon koordinate', 'Project Management': 'Upravljanje Projektom', 'Project Name': 'Ime projekta', 'Project not Found': 'Projekt nije nađen', 'Project Organization Details': 'Detalji organizacije projekta', 'Project Organization updated': 'Organizacija projekta ažurirana', 'Project Organizations': 'Organizacije projekta', 'Project Report': 'Izvještaj projekta', 'Project Status': 'Status projekta', 'Project Time Report': 'Izvještaj o projektnom vremenu', 'Project Tracking': 'Praćenje projekata', 'Project updated': 'Projekat je ažuriran', 'Projection': 'Projekcija', 'Projection added': 'Projekcija je dodana', 'Projection deleted': 'Projekcija je obrisana', 'Projection Details': 'Detalji projekcije', 'Projection Type': 'Tip projekcije', 'Projection updated': 'Projekcija je ažurirana', 'Projections': 'Projekcije', 'Projects': 'Projekti', 'Projects Map': 'Mapa projekata', 'Prominent Adams apple': 'Uočljiva Adamova jabučica', 'pronounced': 'izgovoreno', 'Property reference in the council system': 'Preporuka vlasništva u sistemu vijeća', 'Proposed': 'Predloženo', 'Protected resource': 'Zaštićeni resurs', 'Protection': 'Zaštita', 'Protocol': 'Protokol', 'Provide a password': 'Obezbijedi lozinku', 'Provide an optional sketch of the entire building or damage points. Indicate damage points.': 'Osiguravanje dodatnih skica cijele zgrade ili oštećenih tačaka. Navesti oštečene tačke.', 'Provide Metadata for your media files': 'Obezbjedi metapodatke za vaše medijske datoteke', 'Province': 'Područje', 'Proxy Server URL': 'URL za posrednički (proxy) server', 'Proxy-server': 'Proxy server', 'Psychiatrics/Adult': 'Psihijatrija/odrasli', 'Psychiatrics/Pediatric': 'Psihijatrija/Pedijatrija', 'Pubic hair, Colour': 'Dlake na polnim organima, boja', 'Pubic hair, Extent': 'Dlake na polnim organima, dužina', 'Public': 'Javno', 'Public and private transportation': 'Javni i privatni transport', 'Public assembly': 'Javni skup', 'Public Event': 'Javni događaj', 'Published on': 'Objavljeno', 'pull': 'povuci', 'pull and push': 'povuci i gurni', 'Pull tickets from external feed': 'Povucite karticu sa spoljašnjeg snabdjevanja', 'Punjabi': 'Pandžabi', 'Purchase': 'Kupovina', 'Purchase Date': 'Datum kupovine', 'Purchase Price': 'Nabavna cijena', 'Purpose': 'Namjena', 'push': 'gurni', 'Push tickets to external system': 'Guranje kartica u vanjski sistem', 'Put a choice in the box': 'Označite izbor', 'pygraphviz library not found': 'pygraphviz biblioteka nije nađena.', 'pyramidal': 'piramidalno', 'Pyroclastic Flow': 'Piroklastični tok', 'Pyroclastic Surge': 'Vulkanski pepeo', 'pyserial module not available within the running Python - this needs installing for SMS!': 'pyserial modul nije dostupan unutar tekućeg Pythona-potrebna je instalacija za SMS!', 'Python GDAL required for Shapefile support!': 'Python GDAL potreban za podršku datotekama s likovima!', 'Python Serial module not available within the running Python - this needs installing to activate the Modem': 'Python Serial modul nije dostupan kada je Python pokrenut - ovo zahtijeva instalaciju da bi se aktivirao modem', 'Qatar': 'Katar', 'quadrangular': 'četverougaoni', 'Quantity': 'Količina', 'Quantity Committed': 'Količina učinjena', 'Quantity Fulfilled': 'Količina ispunjena', "Quantity in %s's Inventory": "Količina u %s's Inventaru", "Quantity in %s's Warehouse": 'Količina u %s skladištu', 'Quantity in Transit': 'Količina u prelazu', 'Quantity Needed': 'Potrebna količina', 'Quantity range': 'Opseg količine', 'Quantity Received': 'Primljena količina', 'Quantity Returned': 'Vraćena količina', 'Quantity Sent': 'Poslana količina', 'Quarantine': 'Karantena', 'Queries': 'Upiti', 'Query': 'Upit', 'Query added': 'Upit dodan', 'Query deleted': 'Upit obrisan', 'Query Feature': 'Upit karakteristika', 'Query updated': 'Upit ažuriran', 'Query:': 'Upit:', 'Queryable?': 'Moguće postaviti u upit?', 'Question': 'Pitanje', 'Question Details': 'Detalji pitanja', 'Question Meta-Data': 'Metapodaci pitanja', 'Question Meta-Data added': 'Metapodaci pitanja dodani', 'Question Meta-Data deleted': 'Metapodaci pitanja obrisani', 'Question Meta-Data Details': 'Detalji metapodataka pitanja', 'Question Meta-Data updated': 'Metapodaci pitanja ažurirani', 'Question Summary': 'Rezime pitanja', 'Race': 'Rasa', 'Race group': 'Rasna grupaShift', 'Race, complexion': 'Rasa, miješana', 'Radio Callsign': 'Pozivni znak za radio', 'Radio Details': 'Radio detalji', 'Radiological Hazard': 'Radiološka opasnost', 'Radiology': 'Radiologija', 'Railway Accident': 'Željeznička nesreća', 'Railway Hijacking': 'Razbojništvo na željeznici', 'Rain Fall': 'Padanje kiše', 'RAM Cache Keys': 'RAM cache ključevi', 'Ram Cleared': 'Ram obrisan', 'Rapid Assessment': 'Brza procjena', 'Rapid Assessment added': 'Brza procjena dodana', 'Rapid Assessment deleted': 'Izbrisana brza procjena', 'Rapid Assessment Details': 'Detalji brze procjene', 'Rapid Assessment updated': 'Brza procjena ažurirana', 'Rapid Assessments': 'Brze Procjene', 'Rapid Assessments & Flexible Impact Assessments': 'Brze procjene i fleksibilne procjene utjecaja', 'Rapid Close Lead': 'Jaki i grupni grad', 'Rapid Data Entry': 'Brzi unos podataka', 'Rating': 'Rejting', 'Raw Database access': 'Direktni pristup bazi podataka', 'RC frame with masonry infill': 'RC okvir sa zidanim ispunjenjem', 'Read-Only': 'Samo za čitanje', 'Ready': 'Spreman', 'Real World Arbitrary Units': 'Proizvoljne jedinice iz realnog svijeta', 'Reason': 'Razlog', 'Receive': 'Preuzimanje', 'Receive %(opt_in)s updates:': 'Primite %(opt_in)s nadogradnje:', 'Receive New Shipment': 'Primi novu pošiljku', 'Receive Shipment': 'Prijem pošiljke', 'Receive this shipment?': 'Primiti ovu pošiljku?', 'Receive updates': 'Primi nadogradnje', 'Receive/Incoming': 'Prijem/dolaz', 'Received': 'Primljeno', 'Received By': 'Primljeno od strane', 'Received By Person': 'Osoba primila', 'Received date': 'Primljeno datuma', 'Received Item added': 'Dodata primljena stavka', 'Received Item deleted': 'Primljeni Predmet obrisan', 'Received Item Details': 'Detalji primjene stavke', 'Received Item updated': 'Primljena stavka je ažurirana', 'Received Shipment canceled': 'Rrimljena pošiljka otkazana', 'Received Shipment canceled and items removed from Inventory': 'Isporuka otkazana i stavke uklonjene iz skladišta', 'Received Shipment Details': 'Detalji primljene pošiljke', 'Received Shipment updated': 'Ažurirana primljena stavka', 'Received Shipments': 'Primljene pošiljke', 'Received/Incoming Shipments': 'Primljene/dolazne pošiljke', 'Receiving and Sending Items': 'Primanje i slanje stavki', 'Receiving Inventory': 'Prijem u skladište', 'Reception': 'Prijem', 'Recipient': 'Primalac', 'Recipient(s)': 'Primaoc(i)', 'Recipients': 'Primaoci', 'Recommendations for Repair and Reconstruction or Demolition': 'Prijedlozi za opravak i rekonstrukciju ili rušenje', 'Record': 'Zapis', 'Record %(id)s created': 'Zapis %(id)s kreiran', 'Record %(id)s updated': 'Zapis %(id)s ažuriran', 'RECORD A': 'ZAPIS A', 'Record added': 'Zapis dodan', 'Record already exists': 'Zapis već postoji', 'Record any restriction on use or entry': 'Zabilježi bilo kakva ograničenja prilikom korištenja ili pristupa', 'Record approved': 'Zapis odobren', 'RECORD B': 'ZAPIS B', 'Record could not be approved.': 'Zapis ne može biti potvrđen', 'Record could not be deleted.': 'Zapis ne može biti obrisan', 'Record deleted': 'Zapis obrisan', 'Record Details': 'Detalji zapisa', 'record does not exist': 'zapis ne postoji', 'Record ID': 'Id zapisa', 'record id': 'Id zapisa', 'Record id': 'Id zapisa', 'Record last updated': 'Zapis je posljednji put izmijenjen', 'Record not found': 'Zapis nije nađen', 'Record not found!': 'Zapis nije pronađen', 'Record Saved': 'Zapis spašen', 'Record updated': 'Zapis ažuriran', 'Record Updates': 'Ažuriranja zapisa', 'Recording and Assigning Assets': 'Snimanje i dodjela sredstava', 'Records': 'Zapisi', 'records deleted': 'Zapis obrisan', 'Records merged successfully.': 'Slogovi uspješno spojeni', 'Recovery': 'Oporavak', 'Recovery report added': 'Dodat izvjestaj o pronalaženju', 'Recovery report deleted': 'Izvještaj o pronalaženju izbrisan', 'Recovery report updated': 'Izvještaj o pronalaženju ažuriran', 'Recovery Request': 'Zahtjev za Obnovom', 'Recovery Request added': 'Zahtjev za povrat dodan', 'Recovery Request deleted': 'Zahtjev za povrat obrisan', 'Recovery Request updated': 'Zahtjev za povrat ažuriran', 'Recovery Requests': 'Zahtjevi za povrat', 'rectangular': 'pravougaona', 'Recurring': 'Ponavljajući', 'Recurring Cost': 'Ponavljajući troškovi', 'Recurring cost': 'Ponavljajući troškovi', 'Recurring costs': 'Povratni troškovi', 'Recurring Request?': 'Ponavljajući zahtjev?', 'Red': 'Crveno', 'red': 'crvena', 'Red Cross / Red Crescent': 'Crveni križ/Crveni polumjesec', 'Redirect URL': 'Preusmjeri URL', 'Reference Document': 'Referentni dokument', 'refresh': 'osvježi', 'Refresh Rate (seconds)': 'Brzina osvježavanja (sekunde)', 'Region': 'Oblast', 'Region added': 'Područje dodano', 'Region deleted': 'Područje obrisano', 'Region Details': 'Detalji oblasti', 'Region Location': 'Lokacija regiona', 'Region updated': 'Područje ažurirano', 'Regional': 'Regionalan', 'Regions': 'Oblasti', 'Register': 'Registruj', 'Register As': 'Registruj kao', 'Register for Account': 'Registruj se za korisnički nalog', 'Register Person': 'Registriraj osobu', 'Register Person into this Camp': 'Registruj Osobu u ovaj Kamp', 'Register Person into this Shelter': 'Registruj osobu u sklonište', 'Register them as a volunteer': 'Registruj ih kao volontere', 'Registered People': 'Registrirani ljudi', 'Registered users can': 'Registrovani korisnici mogu', 'Registered users can %(login)s to access the system': 'Potrebna je %(login)s da registrovani korisnici mogu da pristupe sistemu', 'Registration': 'Registracija', 'Registration added': 'Registracija zabilježena', 'Registration Details': 'Detalji registracije', 'Registration entry deleted': 'Registracija zabilježena', 'Registration is still pending approval from Approver (%s) - please wait until confirmation received.': 'Prijava još čeka odobrenje od ovlaštene osobe (%s) - molimo pričekajte dok se prijava ne odobri.', 'Registration key': 'Ključ za registraciju', 'Registration not permitted': 'Registracija nije dozvoljena', 'Registration successful': 'Registracija uspješna', 'Registration updated': 'Registracija zabilježena', 'Registro de Organización': 'Registar organizacije', 'Registro de Refugios': 'Registar izbjeglica', 'Rehabilitation/Long Term Care': 'Rehabilitacija/ Briga u dužem trajanju', 'Reinforced masonry': 'Ojačano zidanje', 'Reject': 'Odbaci', 'Rejected': 'Odbijeno', 'Relationship': 'Odnos', 'Relative added': 'Dodat srodnik', 'Relative deleted': 'Obrisan srodnik', 'Relative Details': 'Povezani detalji', 'Relative updated': 'Srodnik ažuriran', 'Relatives': 'Srodnici', 'Reliable access to sanitation/hygiene items': 'Pouzdan pristup sanitarnim predmetima ', 'Relief': 'Pomoć', 'Relief Item': 'Stavka pomoći', 'Relief Item updated': 'Stavka pomoći je ažurirana', 'Relief Items': 'Predmeti pomoći', 'Relief Team': 'Tim za pomoć', 'Religion': 'Religija', 'Religious': 'Vjerski', 'Religious Leader': 'Vjerski Vođa', 'Reload': 'Ponovo učitaj', 'reload': 'ponovo učitaj', 'Relocate as instructed in the <instruction>': 'Premjesti kako je navedeno u <instruction>', 'Remarks': 'Napomene', 'Remember Me': 'Zapamti me', 'Remote Error': 'Greška udaljenog servisa.', 'Remove': 'Ukloni', 'Remove Activity from this event': 'Ukloni Aktivnost iz ovog događaja', 'Remove all log entries': 'Ukloni stavke zapisnika', 'Remove Asset from this event': 'Sklonite sredstvo sa ovog događaja', 'Remove Asset from this incident': 'Odstrani sredstvo iz ovog incidenta', 'Remove Asset from this scenario': 'Odstrani sredstvo iz ovog scenarija', 'Remove Bookmark': 'Ukloni zabilješku', 'Remove Coalition': 'Ukloni koaliciju', 'Remove Document from this request': 'Ukloni dokument iz ovog zahtjeva', 'Remove existing data before import': 'Obriši postojeće podatke prije uvoza', 'Remove Facility from this event': 'Ukloni objekat iz ovog događaja', 'Remove Facility from this incident': 'Ukloni ovaj objekt iz ovog incidenta', 'Remove Facility from this scenario': 'Ukloni objekat iz ovog scenarija', 'Remove Feature: Select the feature you wish to remove & press the delete key': 'Ukloni karakteristiku: Izaberite karakteristiku koju želite ukloniti i pritisnite dugme za brisanje', 'Remove Human Resource from this event': 'Skloniti ljudske resurse sa ovog događaja', 'Remove Human Resource from this incident': 'Uklonite ovaj ljudski resusr sa ovog incidenta', 'Remove Human Resource from this scenario': 'Uklonite ljudske resurse iz ovog scenarija', 'Remove Incident from this event': 'Ukloni ovaj incident iz datog događaja', 'Remove Incident Report from this event': 'Ukloni izvještaj o incidentu za ovaj događaj', 'Remove Incident Report from this incident': 'Odstrani izvještaj o incidentu iz ovog incidenta', 'Remove Incident Type from this event': 'Ukloni tip incidenta za ovaj događaj', 'Remove Item from Inventory': 'Ukloni stavku iz inventara', 'Remove Layer from Profile': 'Ukloni sloj s profila', 'Remove Layer from Symbology': 'Ukloni sloj s značenja simbola', 'Remove Map Profile from this event': 'Ukloni podešavanje mape iz ovog događaja', 'Remove Map Profile from this incident': 'Ukloni konfiguraciju mape s ovog incidenta', 'Remove Map Profile from this scenario': 'Ukloni podsešavanje mape iz ovog scenarija', 'Remove Network': 'Ukloni mrežu', 'Remove Organization from Project': 'Ukloni organizaciju iz projekta', 'Remove People from Commitment': 'Ukloni Osobu iz Obavezivanja', 'Remove Person from Commitment': 'Ukloni osobu iz zaduženja', 'Remove Person from Group': 'Ukloni osobu iz grupe', 'Remove Person from Team': 'Odstrani osobu iz tima', 'Remove Profile Configuration for Layer': 'Ukloni konfiguraciju profila za ovaj sloj', 'Remove selection': 'Uklonite trenutni odabir', 'Remove Skill': 'Obriši vještinu', 'Remove Skill from Request': 'Ukloni vještinu iz zahtjeva', 'Remove Stock from Warehouse': 'Ukloni zalihu iz skladišta', 'Remove Symbology from Layer': 'Ukloni značenja simbola s sloja', 'Remove Tag for this Event from this Post': 'Ukloni oznaku ovog događaja za ovaj ubacivi tekst', 'Remove Task from this event': 'Izbrišite zadatak sa ovog događaja', 'Remove Task from this incident': 'Odstrani zadatak iz ovog incidenta', 'Remove Task from this scenario': 'Ukloni zadatak iz ovog scenarija', 'Remove this asset from this event': 'Ukloni ovo sredstvo iz datog događaja', 'Remove this asset from this scenario': 'Ukloni sredstvo iz ovog scenarija', 'Remove this entry': 'Ukloni ovaj unos', 'Remove this facility from this event': 'Uklonite ovaj objekat iz ovog događaja', 'Remove this facility from this scenario': 'Ukloni ovaj objekt iz ovog scenaria', 'Remove this human resource from this event': 'Uklonite ovaj ljudski resusr sa ovog dešavanja', 'Remove this human resource from this scenario': 'Izbriši ovaj ljudski resurs sa ovog scenarija', 'Remove this task from this event': 'Ukloni ovaj zadatak sa ovog događaja', 'Remove this task from this scenario': 'Ukloni ovaj zadatak sa scenarija', 'Remove Vehicle from this incident': 'Odstrani vozilo iz ovog incidenta', 'Removed from Group': 'Odstranjen iz grupe', 'Removed from Team': 'Odstranjen iz tima', 'Reopened': 'Ponovo otvoren', 'Repacked By': 'Prepakovao', 'Repair': 'Popravi', 'Repaired': 'Popravljeno', 'Repairs': 'Popravke', 'Repeat': 'Ponovi', 'Repeat your password': 'Ponovite vašu lozinku', 'replace': 'zamijeni', 'Replace': 'Zamijeni', 'Replace All': 'Zamijeni sve', 'Replace if Master': 'Zamjeni ukoliko je Master', 'Replace if Newer': 'Zamijeni ako je novije', 'Replace with Remote': 'Zamijeni s udaljenim', 'Replace/Master': 'Zamjeni/Master', 'Replace/Newer': 'Zamijeni/novije', 'Replies': 'Odgovori', 'Reply': 'Odgovor', 'Reply Message': 'Poruka za automatski odgovor', 'Report': 'Izvještaj', 'Report a Found Person': 'Prijavi pronalazak osobe', 'Report a Missing Person': 'Prijavite nestanak osobe', 'Report a Problem with the Software': 'Prijavi problem sa softverom', 'Report added': 'Dodan izvještaj', 'Report Another Assessment...': 'Prijavite još jednu procjenu...', 'Report by Age/Gender': 'Izvještaj po starosti/spolu', 'Report deleted': 'Obrisan izvještaj', 'Report Details': 'Detalji izvještaja', 'Report my location': 'Prijavi moju lokaciju', 'Report of': 'Izvještaj za', 'Report on Annual Budgets': 'Izvještaj o godišnjem budžetu', 'Report Options': 'Opcije izvještaja', 'Report Resource': 'Prijavi resurs', 'Report that person missing': 'Prijavite nestanak osobe', 'Report the contributing factors for the current EMS status.': 'Izvjesititi o faktorima koji utiču na trenutni status hitne medicinske pomoći', 'Report the contributing factors for the current OR status.': 'Prijavi faktore koji doprinose trenutnom OR statusu.', 'Report them as found': 'Prijavi ih kao pronađene', 'Report them missing': 'Prijavite njihov nestanak', 'Report To': 'Prijavi na', 'Report Types Include': 'Tipovi izvještaja sadrže', 'Report updated': 'Ažuriran izvještaj', 'Reported By': 'Prijavio%2', 'Reported By (Not Staff)': 'Izvijestio (Nije osoblje)', 'Reported By (Staff)': 'Prijavio (osoblje)', 'Reported To': 'Prijavljeno', 'Reporter': 'Izvjestilac', 'Reporter Name': 'Ime izvjestioca', 'Reporter:': 'Izvjestilac:', 'Reporting on the projects in the region': 'Izvještavanje o projektima u regionu', 'ReportLab module not available within the running Python - this needs installing for PDF output!': 'ReportLab modul nije dostupan unutar pokrenutog Pythona-potrebna je instalacija PDF izlaza!', 'ReportLab module not available within the running Python - this needs installing to do PDF Reporting!': 'Modul ReportLab nije dostupan unutar pokrenutog Python-a, ovo zahtijeva instalaciju PDF izlaza!', 'ReportLab not installed': 'ReportLab nije instaliran', 'Reports': 'Izvještaji', 'reports successfully imported.': 'Izvještaji uspješno uvezeni', 'Repositories': 'Repozitoriji', 'Repository': 'Repozitorijum', 'Repository Base URL': 'Osnovni repozitorijski URL...', 'Repository Configuration': 'Konfiguracija repozitorija', 'Repository configuration deleted': 'Obrisana konfiguracija repozitorija', 'Repository configuration updated': 'Ažurirana konfiguracija repozitorija', 'Repository configured': 'Repozitorij konfigurisan', 'Repository Name': 'Ime repozitorija', 'Repository Type': 'Tip repozitorija', 'Repository UUID': 'UUID repozitorija', 'representation of the Polygon/Line.': 'predstavljanje Poligona/Linije', 'Request': 'Zahtjev', 'Request Added': 'Dodan Zahtjev', 'Request added': 'Zahtjev dodan', 'Request Aid': 'Zahtijevaj pomoć', 'Request Canceled': 'Zahtjev otkazan', 'Request deleted': 'Zahtjev obrisan', 'Request Detail': 'Zahtijevaj detalj', 'Request Details': 'Zatražiti detalje', 'Request for Role Upgrade': 'Zahtjev za nadogradnju uloge', 'Request From': 'Zahtjev od', 'Request from Facility': 'Zahtjev s objekta', 'Request Item': 'Zahtjev za predmetom', 'Request Item added': 'Stavka zahtjeva dodana', 'Request Item deleted': 'Traženi artikal obrisan.', 'Request Item Details': 'Zatraži detalje o predmetu', 'Request Item from Available Inventory': 'Zahtjevaj stavku iz dostupnog inventara', 'Request Item updated': 'Zahtijevana stavka je ažurirana', 'Request Items': 'Zatraži stavke', 'Request Job': 'Zahtijevaj posao', 'Request Management': 'Upravljanje zahtjevima', 'Request New People': 'Zatraži nove ljude', 'Request Schedule': 'Raspored zahtjeva', 'Request Status': 'Zahtjev za status', 'Request Status updated': 'Status zahtjeva ažuriran', 'Request Stock from Available Warehouse': 'Zahtijevaj zalihu iz dostupnih skladišta', 'Request Template Added': 'Predložak zahtjeva dodan', 'Request Template Deleted': 'Predložak zahtjeva obrisan', 'Request Template Details': 'Detalji predloška zahtjeva', 'Request Template Updated': 'Predložak zahtjeva ažuriran', 'Request Templates': 'Predlošci zahtjeva', 'Request Type': 'Tip zahtjeva', 'Request Updated': 'Zahtjev ažuriran', 'Request updated': 'Zahtjev ažuriran', 'Request, Response & Session': 'Zahtjev, odgovor i sesija', 'Requested': 'Zahtijevano', 'Requested By': 'Zatraženo od strane', 'Requested by': 'Zatraženo od strane', 'Requested By Facility': 'Zahtijevano po objektima', 'Requested By Location': 'Zahtjevano po lokacijama', 'Requested By Warehouse': 'Zahtjevano po skladištu', 'Requested For': 'Zahtijevano za', 'Requested For Facility': 'Zahtjevano po objektima', 'Requested for Site': 'Zahtjevano po mjestu', 'Requested For Site': 'Zahtjevano po mjestu', 'Requested From': 'Traženo Od', 'Requested From Warehouse': 'Zahtjevano iz skladišta', 'Requested Items': 'Zatraženi predmeti', 'Requested on': 'Zahtijevano na', 'Requested Skill': 'Zahtijevana vještina', 'Requested Skill Details': 'Detalji o traženoj vještini', 'Requested Skill updated': 'Zahtijevana vještina ažurirana', 'Requested Skills': 'Tražene vještine', 'Requester': 'Zahtjevaoc', 'Requests': 'Zahtjevi', 'Requests Management': 'Upravljanje zahtjevima', 'Requests Report': 'Izvještaj o zahtjevima', 'Required by other servers.': 'Zahtijevano od strane drugog servera', 'Required Skill': 'Potrebne vjestine', 'Required Skills': 'Potrebne vještine', 'Required Skills (optional)': 'Potrebne vještine (opciono)', 'Requires login': 'Potrebna prijava', 'Requires Login': 'Potrebna prijava', 'Requires Login!': 'Potrebna prijava!', 'Rescue Ambulance': 'Spasilačka kola hitne pompoći', 'Rescue and recovery': 'Spašavanje i oporavak', 'Rescue Vehicle Tactical Assistance': 'Taktička pomoć vozila za spašavanje', 'Reset': 'Ponovno postavljanje', 'Reset form': 'Vradi formular na početak', 'Reset Password': 'Promijeni lozinku', 'Resize Feature: Select the feature you wish to resize & then Drag the associated dot to your desired size': 'Raširenje karakteristike: Izaberite karakteristiku kojoj želite promijeniti veličinu i prevucite pridruženu tačku željenoj veličini', 'Resolve': 'Razriješi', 'Resolve Conflict': 'Razriješi konflikt', 'Resolve link brings up a new screen which helps to resolve these duplicate records and update the database.': 'Link za Riješi prikazuje novi ekran koji pomaže da se riješi problem sa duplim zapisima i ažurira baza podataka', 'Resolved': 'Riješeno', 'Resource': 'Resurs', 'Resource added': 'Resurs je dodan.', 'Resource Configuration': 'Podešavanje resursa', 'Resource configuration deleted': 'Obrisana konfiguracija resursa', 'Resource configuration updated': 'Ažurirana konfiguracija resursa', 'Resource configured': 'Resurs konfigurisan', 'Resource deleted': 'Resurs je obrisan.', 'Resource Details': 'Detalji o resursima', 'Resource Inventory': 'Zalihe resursa', 'Resource Name': 'Ime resursa', 'Resource Type': 'Tip resursa', 'Resource Type added': 'Vrsta resursa dodana', 'Resource Type deleted': 'Vrsta resursa obrisana', 'Resource Type Details': 'Detalji tipa resursa', 'Resource Type updated': 'Vrsta resursa ažurirana', 'Resource Types': 'Tipovi resursa', 'Resource updated': 'Resurs je ažuriran.', 'Resources': 'Resursi', 'Respiratory Infections': 'Infekcije respiratornih puteva', 'Responded': 'Odgovoreno', 'Responder(s)': 'Odgovorili', 'Responding': 'Odgovara', 'Response': 'Odgovor', 'RESPONSE': 'ODGOVOR', 'Response deleted': 'Odgovor izbrisan', 'Response Details': 'Detalji o odgovoru', 'Response Summaries': 'Sumarni odgovori', 'Response Summary Added': 'Sumarni odgovor dodan', 'Response Summary Deleted': 'Sumarni odgovori obrisan', 'Response Summary Details': 'Detalji sumarnog odgovora', 'Response Summary Report': 'Izvještaj sumarnog odgovora', 'Response Summary Updated': 'Sumarni odgovor ažuriran', 'Responses': 'Odgovori', 'Restricted Access': 'Ograničen pristup', 'Restricted Use': 'Ograničena upotreba', 'Restrictions': 'Ograničenja', 'Results': 'Rezultati', 'Retail Crime': 'Kriminal u maloprodaji', 'retired': 'penzionisan', 'Retrieve Password': 'Preuzeti Lozinku', 'retry': 'pokušaj ponovo', 'Return': 'Vraćanje', 'Return to Request': 'Povratak na zahtjev', 'Returned': 'Vraćeno', 'Returned From': 'Vraćeno sa', 'Returning': 'Vraćanje', 'Revert Entry': 'Vrati unos', 'Review': 'Pregled', 'Review Incoming Shipment to Receive': 'pregled nadolazeće pošiljke', 'Review the situation on maps.': 'Pogledaj situaciju na mapi.', 'Revised Quantity': 'Količina revidirana', 'Revised Status': 'Revidirani status', 'Revised Value per Pack': 'Revidirana vrijednost po paketu', 'RFA Priorities': 'RFA Prioriteti', 'RFA1: Governance-Organisational, Institutional, Policy and Decision Making Framework': 'RFA1: Radni okvir za vladu i organizacije, institucije, politiku i donošenje odluka', 'RFA2: Knowledge, Information, Public Awareness and Education': 'RFA2: Znanje, informacije, javno obavještavanje i obrazovanje', 'RFA3: Analysis and Evaluation of Hazards, Vulnerabilities and Elements at Risk': 'RFA3: Analiza i procjena rizika, ranjivosti i elemenata rizika', 'RFA4: Planning for Effective Preparedness, Response and Recovery': 'RFA4: Planiranje za efektivnu pripremljenost, odgovor i obnovu', 'RFA5: Effective, Integrated and People-Focused Early Warning Systems': 'RFA5: Efektivni, integrisani i fokusirani na ljudstvo sistemi za rano upozoravanje', 'RFA6: Reduction of Underlying Risk Factors': 'RFA6: Smanjenje fundamentalnih faktora rizika', 'Rice': 'Riža', 'Rich Text?': 'Bogat tekst?', 'right': 'desno', 'Right-to-Left': 'Sa desna na lijevo', 'Riot': 'pobuna', 'Risk': 'Rizik', 'Risk transfer': 'Prijenos rizika', 'river': 'rijeka', 'River': 'Rijeka', 'River added': 'Dodana rijeka', 'River deleted': 'Rijeka izbrisana', 'River Details': 'Detalji o rijeci', 'River updated': 'Rijeka ažurirana', 'Rivers': 'Rijeke', 'Road Accident': 'Saobraćajna nesreća', 'Road Closed': 'Zatvorena cesta', 'Road Conditions': 'Stanje putnih pravaca', 'Road Delay': 'Odgađanje puta', 'Road Hijacking': 'Razbojništvo na putu', 'Road Usage Condition': 'Stanje na cesti', 'Roads Layer': 'Sloj puteva', 'Role': 'Uloga', 'Role added': 'Uloga dodana', 'Role assigned to User': 'Dodijeljena uloga korisniku', 'Role deleted': 'Uloga obrisana', 'Role Details': 'Detalji uloga', 'Role Name': 'Ime uloge', 'Role Required': 'Potrebna uloga', 'Role updated': 'Uloga ažurirana', 'Role Updated': 'Uloga izmjenjena', 'Role-based': 'baziran na ulozi', 'Roles': 'Uloge', 'Roles currently assigned': 'Trenutno dodijeljene uloge', 'Roles of User': 'Uloge korisnika', 'Roles Permitted': 'Dopuštene uloge', 'Roles updated': 'Uloge ažurirane', 'Roll On Roll Off Berth': 'Ro-ro brod', 'Roman': 'rimski', 'Romania': 'Rumunija', 'Roof tile': 'Crijep', 'Roofs, floors (vertical load)': 'Krovovi, podovi (vertikalno opterećenje)', 'Room': 'Soba', 'Room added': 'Dodana soba', 'Room deleted': 'Obrisana soba', 'Room Details': 'Detalji sobe', 'Room updated': 'Ažurirana soba', 'Rooms': 'Sobe', 'Rotate Feature: Select the feature you wish to rotate & then Drag the associated dot to rotate to your desired location': 'Rotiranje karakteristike: Izaberite karakteristiku koju želite rotirati i prevucite pridruženu tačku da rotirate na željenoj lokaciji', 'round': 'krug', 'Row Choices (One Per Line)': 'Red odgovora (Jedan po liniji)', 'Rows in table': 'Redova u tabeli', 'Rows in Table': 'Redova u tabeli', 'Rows selected': 'Izabrani redovi', 'RPC Service URL': 'URL RPC usluge', 'RSS Feed': 'RSS dovod', 'RSS Post deleted': 'RSS Ubacivi tekst obrisan', 'RSS Post Details': 'Detalji RSS ubacivog teksta', 'RSS Posts': 'RSS Ubacivi tekst', 'RSS Setting deleted': 'RSS podešavanje obrisano', 'RSS Setting Details': 'Detalji RSS postavki', 'RSS Settings': 'RSS Postavke', 'RSS settings updated': 'RSS podešavanje ažurirano', 'Run every': 'Pokreni svakih', 'Run Functional Tests': 'Pokreni funkcionalne testove', 'Run Interval': 'Interval izvrštavanja', 'Run Now': 'Pokreni sada', 'Running Cost': 'Trenutni troškovi', 'Rural Tank Tactical Vehicle': 'Seoska taktička pokretna cisterna', 'Russia': 'Rusija', 'Russian': 'ruski', 'Rwanda': 'Ruanda', 'Rápido Evaluaciones': 'Brze procjene', 'sack 20kg': 'vreća 20kg', 'sack 50kg': 'vreća 50kg', 'Safe environment for vulnerable groups': 'Sigurno okruženje za ugrožene grupe', 'Safety Assessment Form': 'Obrazac procjene sigurnosti', 'Safety of children and women affected by disaster': 'Sigurnost djece i žena ugroženih zbog katastrofe', 'Safety of children and women affected by disaster?': 'Sigurnost djece i žena ugroženih zbog prirodne nepogode?', 'Sahana access granted': 'Sahana pristup odobren', 'Sahana Community Chat': 'Chat Sahana udruženja', 'Sahana Eden <= Other sync (Sahana Agasti, Ushahidi, etc.)': 'Sahana Eden <= ostalo sinhronizacija (Sahana Agasti, Ushahidi, itd.)', 'Sahana Eden <=> Other': 'Sahana Eden <=> Drugi', 'Sahana Eden <=> Other (Sahana Agasti, Ushahidi, etc.)': 'Sahana Eden <=> Ostalo (Sahana Agasti, Ushahidi, itd.)', 'Sahana Eden <=> Other sync (Sahana Agasti, Ushahidi, etc.)': 'Sahana Eden <=> ostalo sinhronizacija (Sahana Agasti, Ushahidi, itd.)', 'Sahana Eden <=> Sahana Eden': 'Sahana Eden <=> Sahana Eden', 'Sahana Eden <=> Sahana Eden sync': 'Sahana Eden <=> Sahana Eden sinhronizacija', 'Sahana Eden Disaster Management Platform': 'Sahana Eden Platforma za vođenje aktivnosti u slučaju katastrofa', 'Sahana Eden Humanitarian Management Platform': 'Sahana Eden Platforma za vođenje humanitarnih aktivnosti', 'Sahana Eden portable application generator': 'Generator Sahana Eden prenosive aplikacije', 'Sahana Eden Website': 'Sahana Eden Web stranica', 'Sahana FOSS Disaster Management System': 'Sahana FOSS, sistem za upravljanje u slučaju katastrofa', 'Sahana Green': 'Sahana Green', 'Sahana is a collection of web based disaster management applications that provides solutions to large-scale humanitarian coordination and collaboration in disaster situation and its aftermath. Sahana consists of several modules for following functionalities': 'Sahana je kolekcija web baziranih aplikacija za upravljanje u slučaju katastrofe koja pruža rješenje za humanitarne koordinacije i saradnju većeg obima', 'Sahana Steel': 'Sahana čelik', 'Sahana Website': 'Sahana Web stranica', 'Saint Lucia': 'Sveta Lucija', 'Saint Vincent and the Grenadines': 'Sent Vincent i Grenadin', 'Sale': 'Prodaja', 'Salted Fish': 'Zasoljena riba', 'Salvage material usable from destroyed houses': 'Preostali materijal upotrebljiv iz uništenih kuća', 'Salvage material usable from destroyed schools': 'Preostali materijal upotrebljiv iz uništenih škola', 'Sanitation': 'Sanitacija', 'Sanitation problems': 'Sanitarni problemi', 'Satellite': 'Satelit', 'Satellite Layer': 'Satelitski sloj', 'Satellite Office': 'Pridruženi ured', 'Saturday': 'Subota', 'Saudi Arabia': 'Saudijska Arabija', 'Save': 'Snimi', 'Save and add Items': 'Snimi i dodaj stavke', 'Save and add People': 'Snimi i dodaj ljude', 'Save and Continue Editing': 'Snimi i nastavi uređivanje', 'Save any Changes in the one you wish to keep': 'Snimi promjene među onim koje želite zadržati', 'Save as New Map?': 'Snimi kao novu mapu?', 'Save Changes': 'Snimi promjene', 'Save Map': 'Snimi mapu', 'Save model as...': 'Snimi model kao...', 'Save: Default Lat, Lon & Zoom for the Viewport': 'Snimi: Podrazumijevane geografska širina i dužina i promjena veličine za prostor pogleda', 'Saved': 'Snimljeno', 'Saved filters': 'Snimljeni filteri', 'Saved Filters': 'Snimljeni filteri', 'Saved Filters...': 'Snimljeni filteri...', 'Saved Maps': 'Snimljene mape', 'Saved search added': 'Snimljeno traženje dodano', 'Saved search deleted': 'Snimljeno traženje obrisano', 'Saved search details': 'Snimljeni detalji pretrage', 'Saved search updated': 'Snimljeno traženje ažurirano', 'Saved Searches': 'Snimljene pretrage', 'Saved searches': 'Snimljene pretrage', 'Saved.': 'Spašeno.', 'Saving...': 'Snimam...', 'Scale of Results': 'Skala rezultata', 'Scanned Copy': 'Skenirana kopija', 'Scanned Forms Upload': 'Slanje skeniranih obrazaca', 'Scenario': 'Scenario', 'Scenario added': 'Scenarij dodat', 'Scenario deleted': 'Scenarij izbrisan', 'Scenario Details': 'Detalji scenarija', 'Scenario updated': 'Ažuriran scenario', 'Scenarios': 'Scenariji', 'Schedule': 'Raspored', 'Schedule synchronization jobs': 'Rasporedi poslove sinhronizacije', 'Scheduled Jobs': 'Zakazani poslovi', 'Schema': 'Shema', 'School': 'Škola', 'School activities': 'Školske aktivnosti', 'School assistance': 'Pomoć školi', 'School assistance received/expected': 'Nastavna pomoć primljena/očekivana', 'School attendance': 'Školsko prisustvo', 'School Closure': 'Zatvaranje škole', 'School Code': 'Šifra škole', 'School destroyed': 'Škola uništena', 'School District': 'Rejon škole', 'School District added': 'Dodat školski rejon', 'School District deleted': 'Školski rejon obrisan', 'School District Details': 'Detalji školskog rejona', 'School District updated': 'Ažuriran školski rejon', 'School Districts': 'Školski rejoni', 'School heavily damaged': 'Škola teško oštećena', 'School Holidays only': 'Samo školski praznici', 'School Lockdown': 'Škola je zaključana', 'School Report added': 'Dodat školski izvjestaj', 'School Report deleted': 'Izvještaj o školii izbrisan', 'School Report Details': 'Detalji izvještaja o školama', 'School Report updated': 'Izvještaj o školama ažuriran', 'School Reports': 'Školski izvještaj', 'School Safety': 'Sigurnost škole', 'School Teacher': 'Školski nastavnik', 'School tents received': 'Primljeni školski šatori', 'School tents, source': 'Školski šatori, izvor', 'School used for other purpose': 'Škola koja se koristi u druge svrhe', 'School/studying': 'Škola/učenje', 'Schools': 'Škole', 'Scubadiving Support Vehicle': 'Vozilo za ronioce', 'Seaport': 'Luka', 'Seaport added': 'Luka dodana', 'Seaport deleted': 'Luka obrisana', 'Seaport Details': 'Detalji luke', 'Seaport updated': 'Luka ažurirana', 'Seaports': 'Luka', 'Search': 'Potraži', 'Search & List Bins': 'Pretraga i prikaz korpi', 'Search & List Catalog': 'Pretraga i prikaz kataloga', 'Search & List Items': 'Traži i izlistaj stavke', 'Search & List Site': 'Pretraga i prikaz mjesta', 'Search & List Sub-Category': 'Traži i prikaži podkategoriju', 'Search Activities': 'Pretraži aktivnosti', 'Search Activity Report': 'Pretraga izvještaja o aktivnostima', 'Search Addresses': 'Adrese Pretrage', 'Search After Save?': 'Traži nakon snimanja', 'Search Aid Requests': 'Traži zahtjeve za pomoć', 'Search All Requested Items': 'Traži sve potrebne stavke', 'Search All Requested Skills': 'Traži sve zahtjevane vještine', 'Search Alternative Items': 'Traži alternativne predmete', 'Search and Edit Group': 'Pretraži i uredi grupu', 'Search and Edit Individual': 'Pretraži i Uredi Pojedinca', 'Search Assessment Summaries': 'Sumarno traženje sredstava', 'Search Assessments': 'Pretraži procjene', 'Search Asset Assignments': 'Traži dodjelu sredstava', 'Search Asset Log': 'Traži zapisnika o sredstvima', 'Search Assets': 'Traži sredstva', 'Search Baseline Type': 'Pretraga tipa referentne tačke', 'Search Baselines': 'Pretraži referentne tačke', 'Search Brands': 'Traži proizvođačke marke', 'Search Budgets': 'Pretraži budžete', 'Search Bundles': 'Pretraži pakete', 'Search by ID Tag': 'Pretraži po ID oznaci', 'Search by Skill Types': 'Pretraži po vrstama vještina', 'Search by skills': 'Traži po vještinama', 'Search by Skills': 'Traži po vještinama', 'Search Camp Services': 'Pretraži usluge kampa', 'Search Camp Types': 'Traži vrste kampova', 'Search Camps': 'Pretraži kampove', 'Search Catalog Items': 'Pretraži stavke kataloga', 'Search Catalogs': 'Pretraži kataloge', 'Search Category<>Sub-Category<>Catalog Relation': 'Traži kategorija<>Podkategorija<>kataloški odnos', 'Search Certificates': 'Traži certifikate', 'Search Certifications': 'Pretraga certifikata', 'Search Checklists': 'Pretraži kontrolnu listu', 'Search Cluster Subsectors': 'Podsektori za pretragu skupa', 'Search Clusters': 'Pretraži skupove', 'Search Commitment Items': 'Pretraži stavke zaduženja', 'Search Commitments': 'Traži zaduženja', 'Search Committed People': 'Pretraži zadužene ljude', 'Search Competencies': 'Pretraga stručnosti', 'Search Competency Ratings': 'Pretraži rejtinge kompetencija', 'Search Configs': 'Traži konfiguracije', 'Search Contact Information': 'Pretraga informacija o kontaktu', 'Search Contacts': 'Pretraži kontakte', 'Search Course Certicates': 'Pretraga certifikata kursa', 'Search Courses': 'Pretraga kurseva', 'Search Credentials': 'Traži akreditive', 'Search Criteria': 'Kriterij pretrage', 'Search Distribution Items': 'Pretraži stavke raspodjele', 'Search Distributions': 'Traži raspodjele', 'Search Documents': 'Pretraži dokumente', 'Search Donors': 'Traženje donatora', 'Search Email InBox': 'Traži E-maik dolazne poruke', 'Search Email OutBox': 'Traži SMS E-mail poruke', 'Search Entries': 'Traži stavke', 'Search Events': 'Pretraži događaje', 'Search Facilities': 'Pretraga Objekata', 'Search Feature Class': 'traži klasu karakteristika', 'Search Feature Groups': 'Traži grupe karakteristika', 'Search Feature Layers': 'Pretraga slojeva s karakteristikama', 'Search Find Report': 'Traži izvještaj za traženje', 'Search Flood Reports': 'Pretraga izvještaja o poplavi', 'Search for a commitment by Committer name, Request ID, Site or Organization.': 'Traži zaduženje po imenu zadužioca, ID zahtjeva, mjestu ili organizaciji', 'Search for a Hospital': 'Traži bolnicu', 'Search for a Location': 'Traži lokaciju', 'Search for a Location by name, including local names.': 'Traži Lokaciju pomoću imena, uključujući lokalne nazive.', 'Search for a Person': 'Potraga za osobom', 'Search for a Project': 'Pretraživanje projekta', 'Search for a Project by name, code, location, or description.': 'Traži projekt po imenu, šifri, lokaciji ili opisu', 'Search for a Project by name, code, or description.': 'Traži projekt po imenu, šifri ili opisu', 'Search for a Project Community by name.': 'Traži zajednicu projekta po imenu', 'Search for a Request': 'Pretraži zahtijev', 'Search for a request by Site name, Requester name or free text.': 'Traži zahtjev po imenu mjesta, zahtjevaocu ili slobodnom tekstu.', 'Search for a shipment by looking for text in any field.': 'Pretraži isporuke traženjem teksa iz bilo kog polja.', 'Search for a shipment received between these dates': 'Pretraži pošiljku primljenu između ovih datuma', 'Search for a shipment sent between these dates.': 'Pretraži pošiljku poslanu između ovih datuma', 'Search for a shipment which has an estimated delivery between these dates.': 'Traži pošiljke čija se isporuka očekuje između ovih datutma.', 'Search for a vehicle by text.': 'Pretraži vozilo po tekstu', 'Search for an asset by text.': 'Pronađi sredstva uz pomoć teksta', 'Search for an item by category.': 'Tražite predmet po kategoriji', 'Search for an item by brand.': 'Pretraga predmeta po marki', 'Search for an item by catalog.': 'Kataloška pretraga stavki', 'Search for an item by category.': 'Pretraga po kategoriji', 'Search for an item by its code, name, model and/or comment.': 'Pretraga predmeta po kodu, imenu, modelu i/ili komentaru', 'Search for an item by text.': 'Potraži stavku unosom teksta', 'Search for an item by Year of Manufacture.': 'Pretraga stavki po atributu: Godina proizvodnje', 'Search for an order by looking for text in any field.': 'Pretraži narudžbu traženjem teksta iz bilo kog polja.', 'Search for an order expected between these dates': 'Pretraži narudžbu očekivanu između ovih datuma', 'Search for an Organization by name or acronym': 'Potraži organizaciju po imenu ili akronimu', 'Search for an Organization by name or acronym.': 'Traži organizaciju po imenu ili akronimu', 'Search for asset by country.': 'Pretraga sredstava po državama', 'Search for asset by location.': 'Traženje sredstava po lokaciji.', 'Search for commitments available between these dates.': 'Traži obaveze dostupne između ovih datuma', 'Search for commitments made between these dates.': 'Traži obaveze načinjene između ovih datuma', 'Search for Items': 'Traži stavke', 'Search for items by donating organization.': 'Traži stavke po donatorskoj organizaciji', 'Search for items by owning organization.': 'Traži stavke po vlasničkoj organizaciji', 'Search for items with this text in the name.': 'Traži stavke s tim tekstom u imenu', 'Search for office by country.': 'Traži ured po zemlji', 'Search for office by location.': 'Pretraga ureda po lokaciji', 'Search for office by organization or branch.': 'Traži kancelariju po organizaciji ili ogranku.', 'Search for office by organization.': 'Pretraži kancelarije po organizaciji', 'Search for office by text.': 'Pretraga ureda po tekstu', 'Search for Persons': 'Traži po osobama', 'Search for requests made between these dates.': 'Traži zahtjeve napravljene između ovih datuma', 'Search for requests required between these dates.': 'Traži zahtjeve potrebne između ovih datuma', 'Search for Staff or Volunteers': 'Potraži osoblje ili volontere', 'Search for vehicle by location.': 'Traži vozilo po lokaciji', 'Search for warehouse by country.': 'Pretraga skladišta na osnovu atributa: Država', 'Search for warehouse by location.': 'Traži skladišta po lokaciji', 'Search for warehouse by organization.': 'Potraži skladište po organizaciji.', 'Search for warehouse by text.': 'Potraži skladište unosom teksta', 'Search GPS data': 'Pretraži GPS podatke', 'Search Groups': 'Traži grupe', 'Search here for a person record in order to:': 'Pretraži ovdje za zapis osobe koje su zabilježene da bi :', "Search here for a person's record in order to:": 'Pretraži ovdje za lične zapise da bi :', 'Search Homes': 'Pretraži domaćinstva', 'Search Hospitals': 'Traži bolnice', 'Search Human Resources': 'Pretraži ljudske resurse', 'Search Identity': 'Pretraži identitet', 'Search Images': 'Pretražuj slike', 'Search Impact Type': 'Traži tip utjecaja', 'Search Impacts': 'Pretraga utjecaja', 'Search Import Files': 'Pretražite uvezene datoteke', 'Search Incident Reports': 'Pretraži izvještaje o incidentima', 'Search Incidents': 'Traži incidente', 'Search Inventory Items': 'Pretraži artikle sa popisa', 'Search Inventory items': 'Pretraži predmete inventara', 'Search Inventory Stores': 'Pretraži skladišta inventara', 'Search Item Catalog(s)': 'Pretraži katalog(e) stavki', 'Search Item Categories': 'Pretraži kategorije stavki', 'Search Item Packets': 'Pretraga paketa stavki', 'Search Item Packs': 'Pretraga paketa predmeta', 'Search Item Sub-Category(s)': 'Traži podkategoriju stavke', 'Search Items': 'Pretraži stavke', 'Search Job Roles': 'Pretraži radna mjesta', 'Search Keys': 'Ključevi pretrage', 'Search Kits': 'Traži komplete', 'Search Layers': 'Pretraga slojeva', 'Search Level': 'Nivo pretrage', 'Search Level 1 Assessments': 'Traži procjene prvog nivoa', 'Search Level 2 Assessments': 'Pretraživanje procjena level 2', 'Search location in Geonames': 'Navedite lokaciju u Geonames', 'Search Locations': 'Traži lokacije', 'Search Log Entry': 'pretraži unose iz zapisnika', 'Search Map Profiles': 'Pretraži konfiguracije mape', 'Search Markers': 'Pretraži oznake', 'Search Member': 'Potraži člana', 'Search Members': 'Traži članove', 'Search Membership': 'Pretraži članstvo', 'Search Memberships': 'Pretraži članstva', 'Search messages': 'Pretraži poruke', 'Search Metadata': 'Traži metapodatke', 'Search Missions': 'Pretraži misije', 'Search Need Type': 'Pretraga tipa zahtijeva ', 'Search Needs': 'Traži potrebe', 'Search Notes': 'Traži bilješke', 'Search Offices': 'Tražite kancelarije', 'Search Organisations': 'Traži organizacije', 'Search Organizations': 'Pretraži organizacije', 'Search Partners': 'Traži partnere', 'Search Patients': 'Pretraga pacijenata', 'Search Peer': 'Pretraži saradnike', 'Search Peers': 'Traži saradnike', 'Search Personal Effects': 'Pretraži osobne učinke', 'Search Persons': 'Pretraži osobe', 'Search Photos': 'Pretraga fotografija', 'Search Population Statistics': 'Pretraga statistike stanovništva', 'Search Positions': 'Pretraži pozicije', 'Search Problems': 'Pretraži probleme', 'Search Projections': 'Pretraga projekcija', 'Search Projects': 'Pretraži projekte', 'Search Queries': 'Traži upite', 'Search Query': 'Traži upit', 'Search Rapid Assessments': 'Pretraži brze procjene', 'Search Received Items': 'Pretraži primljene stavke', 'Search Received Shipments': 'Pretraži primljene isporuke', 'Search Records': 'Pretraga zapisa', 'Search Recovery Reports': 'Pretraga izvještaja o pronalaženjima', 'Search Registations': 'Pretraga registracija', 'Search Registration Request': 'Potraži zahtjev za registraciju', 'Search Relatives': 'Pretraži srodnike', 'Search Report': 'Izvještaj pretrage', 'Search Reports': 'Pretraga Izvještaja', 'Search Request': 'Potražite zahtjev', 'Search Request Items': 'Pretraži zahtijevane stavke', 'Search Requested Items': 'Pretraži tražene jedinice', 'Search Requested Skills': 'Pretraga traženih sposobnosti', 'Search Requests': 'Zahtjevi za pretragu', 'Search Resources': 'Pretraži resurse', 'Search Responses': 'Pretraga odgovora', 'Search Results': 'Rezultati pretrage', 'Search Rivers': 'Traži rijeke', 'Search Roles': 'Traži uloge', 'Search Rooms': 'Traži sobe', 'Search Scenarios': 'Pretražo scenarije', 'Search School Districts': 'Pretraga rejona škole', 'Search School Reports': 'Pretraga Izvještajao školama', 'Search Sections': 'Traži sekcije', 'Search Sectors': 'Pretraži Sektore', 'Search Sent Email': 'Traži poslanu elektronsku poštu', 'Search Sent Items': 'Pretraži poslane stavke', 'Search Sent Shipments': 'Pretraži poslane pošiljke', 'Search Sent SMS': 'Pretraži poslane SMS', 'Search Service Profiles': 'Pretraživanje profila usluge', 'Search Settings': 'Postavke pretrage', 'Search Shelter Services': 'Traži uslugu skloništa', 'Search Shelter Types': 'Traži tipove skloništa', 'Search Shelters': 'Pretraži skloništa', 'Search Shipment<>Item Relation': 'Traži pošiljku<>Odnos predmeta', 'Search Shipped Items': 'Pretraži isporučene stavke', 'Search Site(s)': 'Traži mjesta', 'Search Skill Equivalences': 'Pretraži ekvivalenciju vještina', 'Search Skill Provisions': 'Pretraga Provizije vjestina', 'Search Skill Types': 'Pretraži tipove sposobnosti', 'Search Skills': 'Pretraži vještine', 'Search SMS InBox': 'Traži SMS dolazne poruke', 'Search SMS OutBox': 'Traži SMS odlazne poruke', 'Search Solutions': 'Traži rješenja', 'Search Sources': 'Traži izvore', 'Search Staff': 'Traži osoblje', 'Search Staff & Volunteers': 'Potraži osoblje ili volontere', 'Search Staff or Volunteer': 'Pretraži osoblje ili volontere', 'Search Staff Types': 'Traži tip osoblja', 'Search Status': 'Status pretrage', 'Search Storage Bin Type(s)': 'Traži vrste korpe za smještaj', 'Search Storage Bin(s)': 'Traži korpe za smještaj', 'Search Storage Location(s)': 'Traži lokacije o smještaju', 'Search Subscriptions': 'Pretraži Pretplatnike.', 'Search Subsectors': 'Pretražite podsektore', 'Search Support Requests': 'Pretraži zahtjeve za podršku', 'Search Tasks': 'Pretraga zadataka', 'Search Teams': 'Traži timove', 'Search Themes': 'Pretraži teme', 'Search Tickets': 'Pretraži kartice', 'Search Tracks': 'Pretraži tragove', 'Search Training Participants': 'Traži učesnike obuke', 'Search Trainings': 'Traži treninge', 'Search Twitter Tags': 'Pretraži Twitter tagove', 'Search Units': 'Pretraži jedinice', 'Search Users': 'Pretražite korisnike', 'Search Vehicle Details': 'Pretraži detalje vozila', 'Search Vehicles': 'Pretraga vozila', 'Search Volunteer Availability': 'Pretraži mogućnosti volontiranja', 'Search Volunteers': 'Pretraži volontere', 'Search Warehouse Items': 'Pretraži stavke skladišta', 'Search Warehouses': 'Pretraži skladišta', 'Searched?': 'Traženo?', 'Searching for different groups and individuals': 'Traženje različitih grupa i pojedinaca', 'secondary effect': 'sekundarni efekat', 'Secondary Server (Optional)': 'Sekundarni server (opcionalno)', 'seconds': 'sekundi', 'Seconds must be a number between 0 and 60': 'Sekunde moraju biti broj između 0 i 60', 'Seconds must be a number.': 'Sekunde moraju biti broj', 'Seconds must be less than 60.': 'Sekunde bi trebale biti broj manji od 60', 'Section': 'Odjeljak', 'Section deleted': 'Odjel izbrisan', 'Section Details': 'Detalji o odjelima', 'Section updated': 'Odjel ažuriran', 'Sections': 'Sekcije', 'Sections that are part of this template': 'Sekcije koje su dio ovog šablona', 'Sections that can be selected': 'Djelovi koji mogu biti odabrani', 'Sector': 'Sektor', 'Sector added': 'Sektor dodan', 'Sector added to Organization': 'Sektor dodan u organizaciju', 'Sector added to Project': 'Sektor dodan u projekt', 'Sector added to Theme': 'Sekttor dodan u temu', 'Sector deleted': 'Sektor obrisan', 'Sector Details': 'Detalji o sektoru', 'Sector removed from Organization': 'Sektor uklonjen iz organizacije', 'Sector removed from Project': 'Sektor uklonjen sa projekta', 'Sector removed from Theme': 'Sektor uklonjen iz teme', 'Sector updated': 'Sektor ažuriran', 'Sector(s)': 'Sektor(i)', 'Sectors': 'Sektori', 'Sectors to which this Activity Type can apply': 'Sektori na koje je ova vrsta aktivnosti primjenjiva', 'Sectors to which this Theme can apply': 'Sektori na koje je ova vrsta tema primjenjiva', 'Secure Storage Capacity': 'Sigurni kapacitet smještaja', 'Security': 'Sigurnost', 'Security Description': 'Sigurnosni opis', 'Security Policy': 'Politika sigurnosti', 'Security problems': 'Sigurnosni problemi', 'Security Required': 'Potrebna sigurnost', 'Security Status': 'Sigurnosni status', 'See a detailed description of the module on the Sahana Eden wiki': 'Vidi detaljan opis modula na Sahana Eden wiki', 'See all': 'Vidi sve', 'See All Entries': 'Pogledajte sve unose', 'see comment': 'prikaži komentar', 'see more': 'vidi više', 'See the universally unique identifier (UUID) of this repository': 'Postavi univerzalno jedinstveni identifikator (UUID) za ovaj repozitorij', 'See unassigned recovery requests': 'Pregledaj neraspoređene zahtjeve za oporavak', 'Seen': 'Viđeno', 'Select': 'Izaberi', 'Select %(location)s': '%(location)s za izbor', "Select 2 records from this list, then click 'Merge'.": "Odaberite 2 sloga iz liste i kliknite 'Spoji0", 'Select a label question and at least one numeric question to display the chart.': 'Odabertite pitanje oznake i barem jedno brojčano pitanje za prikaz dijagrama.', 'Select a location': 'Izaberite mjesto', "Select a manager for status 'assigned'": "Odaberi menadžera za status 'dodijeljeno'", "Select a person in charge for status 'assigned'": "Odaberi osobu zaduženu za status 'dodijeljeno'", 'Select a question from the list': 'Označite pitanje sa liste', 'Select a range for the number of total beds': 'Označite opseg za ukupan broj kreveta', "Select a Room from the list or click 'Add Room'": 'Izaberite sobu sa spiska ili pritisnite "Dodaj sobu"', "Select a Room from the list or click 'Create Room'": 'Izaberite sobu sa spiska ili kliknite "Kreiraj sobu"', 'Select all': 'Izaberi sve', 'Select All': 'Izaberi sve', 'Select all templates (All modules included)': 'Odaberi sve predloške (Svi moduli uključeni)', 'Select all that apply': 'Označi sve što se odnosi na to', 'Select an existing bin': 'Odaberi postojeću korpu', 'Select an image to upload. You can crop this later by opening this record.': 'Odaberite sliku za postavljanje. Možete je izreati kasnije za otvaranje ovog zapisa.', 'Select an Organisation to see a list of offices': 'Izaberi organizaciju za prikaz liste kancelarija', 'Select an Organization to see a list of offices': 'Izaberi organizaciju za prikaz liste kancelarija', 'Select Existing Location': 'Izaberi postojeću lokaciju', 'Select from registry': 'Izaberi iz registra', 'Select Items from the Request': 'Izaberite željene stavke', 'Select Items from this Inventory': 'Odaberite stavke iz ovog inventara', 'Select Label Question': 'Izaberi pitanje oznake', 'Select language code': 'Izaberi oznaku jezika', 'Select Modules for translation': 'Odaberite module za prevođenje', 'Select Modules which are to be translated': 'Odaberite module koji se trebaju prevesti', 'Select Numeric Questions (one or more):': 'Odaberite numerička pitanja (jedno ili više):', 'Select one or more option(s) that apply': 'Odaberite jednu ili dvije primjenjive opcije', 'Select Photos': 'Izaberi fotografije', 'Select resources to import': 'Izaberite resurse za uvesti', 'Select Skills from the Request': 'Izaberi vještine iz Zahtjeva', 'Select Stock from this Warehouse': 'Odaberite zalihu iz ovog skladišta', 'Select the default site.': 'Izaberi podrazumijevano mjesto', 'Select the language file': 'Izaberi jezičku datoteku', 'Select the option that applies': 'Odaberite primjenjivu opciju', 'Select the overlays for Assessments and Activities relating to each Need to identify the gap.': 'Izaberite preklapanje za procjene i aktivnosti koje se odnose na svaku potrebu da se identifikuje propust.', 'Select the person assigned to this role for this project.': 'Odaberi osobu dodijeljenu za ovu ulogu za ovaj projekt', 'Select the person associated with this scenario.': 'Odaberi osobu dodijeljenu za ovaj scenario', 'Select the required modules': 'Izaberite potrebne module', "Select this if all specific locations need a parent at the deepest level of the location hierarchy. For example, if 'district' is the smallest division in the hierarchy, then all specific locations would be required to have a district as a parent.": 'Odaberite ovo ako sve određene lokacije trebaju roditelja na najdubljem nivou lokacijske hijerarhije. Na primjer, ako je "distrikt" najmanja podjela u hijerarhiji,to znači da sve određene lokacije moraju imati distrikt kao roditelja.', "Select this if all specific locations need a parent location in the location hierarchy. This can assist in setting up a 'region' representing an affected area.": "Odaberite ovo ako sve specifične lokacije trebaju imati nadređenu lokaciju usljed hijerarhije. Ovo može pomoći u postavljanju 'regije' koja predstavlja pogođenu oblast", 'Select this if you need this resource to be mapped from site_id instead of location_id.': 'Odaberite ovo ako želite da se resurs mapira prema oznaci mjesta site_id umjesto lokacije location_id.', 'Select This Location': 'Odaberi ovu lokaciju', 'Select to show this configuration in the menu.': 'Odaberi prikaz ove konfiguracije u meniju', 'Select to show this configuration in the Regions menu.': 'Izaberite da vam se prikaže ova konfiguracija u meniju Regije', 'selected': 'odabran', 'Selected Jobs': 'Izabrani poslovi', 'Selected OCR Form has no pages. Use another revision of create a new revision by downloading a new Form.': 'Odabrani OCR nema stranice. Koristite drugu reviziju da kreirate novu reviziju preuzimanjem novog formulara.', 'Selected Questions for all Completed Assessment Forms': 'Odabrana pitanja za sve ispunjene formulare procjene', 'Selects what type of gateway to use for outbound SMS': 'Izabira tip mrežnog izlaza za izlazni SMS', 'Selects whether to use a Modem, Tropo or other Gateway for sending out SMS': 'Bira da li koristiti modem, tropo ili neki drugi način za slanje SMS', 'Selects whether to use the gateway or the Modem for sending out SMS': 'Bira da li koristiti mrežni izlaz ili modem za slanje SMS', 'Self Registration': 'Samoregistracija', 'Self-care': 'Vlasita briga', 'Self-registration': 'Samoregistracija', 'Send': 'Pošalji', 'Send & Receive Email messages (e.g. for alerting)': 'Pošalji i primi Email poruke (npr. za alarmiranje)', 'Send & Receive SMS messages (e.g. for alerting)': 'Pošalji i primi SMS poruke (npr. za alarmiranje)', 'Send a message to this person': 'Pošalji poruku ovoj osobi', 'Send a message to this team': 'Pošalji poruku ovom timu', 'Send Alerts using Email &/or SMS': 'Poslati upozorenje koristeći Email i/ili SMS', 'Send batch': 'Paketno slanje', 'Send Commitment as Shipment': 'Pošalji zaduženje kao pošiljku', 'Send Dispatch Update': 'Pošalji ažuriranje raspodjele', 'Send Email': 'Pošalji e-mail', 'Send from %s': 'Pošalji sa %s ', 'Send Message': 'Šalji poruku', 'Send message': 'Pošalji poruku', 'Send new message': 'Pošalji novu poruku', 'Send New Shipment': 'Pošalji novu pošiljku', 'Send Notification': 'Pošalji obavijest', 'Send Shipment': 'Slanje pošiljke', 'Send SMS': 'Pošalji SMS', 'Send Task Notification': 'Obavijest o slanju zadatka', 'Sender': 'Pošiljalac', 'Sender deleted': 'Pošiljalac obrisan', 'Sender Priority updated': 'Prioritet pošiljaoca ažuriran', 'Sender Whitelisted': 'Pošiljaoc na bijeloj listi', 'Sends & Receives Alerts via Email & SMS': 'Šalje i prima upozorenja putem emaila i SMS-a', 'Senior (50+)': 'Stariji (50+)', 'Sensitivity': 'Osjetljivost', 'Sent': 'Poslano', 'Sent By': 'Poslano od', 'Sent By Person': 'Poslano putem osobe', 'Sent date': 'Poslano dana', 'Sent Emails': 'Poslana elektronska pošta', 'Sent Item deleted': 'Poslana stavka obrisan', 'Sent Item Details': 'Detalji poslanog predmeta', 'Sent Item updated': 'Poslana stavka ažurirana', 'Sent Shipment canceled': 'Poslana Pošiljka otkazana', 'Sent Shipment canceled and items returned to Inventory': 'Poslana pošiljka otkazana i predmeti vraćeni u inventar', 'Sent Shipment canceled and items returned to Warehouse': 'Poslana pošiljka otkazana i predmeti vraćeni u skladište', 'Sent Shipment Details': 'Detalji poslate pošiljke', 'Sent Shipment has returned, indicate how many items will be returned to Warehouse.': 'Poslana pošiljka je vraćena, navedite koliko će se stavki vratiti u skladište', 'Sent Shipment updated': 'Poslana pošiljka ažurirana', 'Sent Shipments': 'Poslate pošiljke', 'Sent SMS': 'Pošalji SMS', 'Sent Tweets': 'Pošalji Tweets', 'Separate latrines for women and men': 'Odvojeni zahodi za muškarce i žene', 'separated': 'razdvojeni', 'Separated children, caregiving arrangements': 'Djeca odvojena od roditelja, raspored skrbnika', 'separated from family': 'Odvojen/a od porodice', 'Seraiki': 'Seraiki', 'Serbia': 'Srbija', 'Serial Number': 'Serijski broj', 'Series': 'Serije', 'Series added': 'Serija dodana', 'Series deleted': 'Serija obrisana', 'Series Details': 'Detalji serije', 'Series details missing': 'Nedostaju detalji serije', 'Series updated': 'Serija ažurirana', 'Server': 'Server', 'Service': 'Usluga', 'Service added': 'Usluga dodana', 'Service added to Organization': 'Usluga dodana organizaciji', 'Service Catalogue': 'Katalog usluga', 'Service deleted': 'Usluga obrisana', 'Service Details': 'Detalji usluge', 'Service Due': 'Rok usluge', 'Service or Facility': 'Usluga ili objekat', 'Service profile added': 'Dodat profil usluge', 'Service profile deleted': 'Obrisan profil usluge', 'Service profile updated': 'Ažuriran profil usluge', 'Service Record': 'Zapis usluge', 'Service removed from Organization': 'Usluga uklonjena iz organizacije', 'Service updated': 'Usluga ažurirana', 'Services': 'Usluge', 'Services Available': 'Dostupne usluge', 'Set as default Site': 'Postavi kao podrazumijevano mjesto', 'Set as my Default': 'Postavi kao moje podrazumijevano', 'Set Base Facility/Site': 'Postavljeno mjesto/objekt baze', 'Set Base Site': 'Postavi osnovnu lokaciju', 'Set By': 'Postavi prema', 'Set True to allow editing this level of the location hierarchy by users who are not MapAdmins.': 'Postaviti na True da se dozvoli uređivanje ovog nivoa hierarhije lokacija korisnicima koji nisu MapAdmin.', 'Setting added': 'Podešavanje dodano', 'Setting deleted': 'Podešavanje obrisano', 'Setting Details': 'Detalji postavke', 'Setting updated': 'Postavka ažurirana', 'Settings': 'Postavke', 'Settings updated': 'Podešavanja ažurirana', 'Settings were reset because authenticating with Twitter failed': 'Postavke su obrisane zbog neuspjele autentifikacije sa Twitterom', 'Settings which can be configured through the web interface are available here.': 'Postavke, koje je moguće konfigurisati putem web sučelja, su dostupne ovdje.', 'Severe': 'Strogo', 'Severity': 'Ozbiljnost', 'Severity:': 'Ozbiljnost:', 'Sex': 'Spol', 'Seychelles': 'Sejšeli', 'shallow': 'plitak', 'Shapefile Layer': 'Sloj datoteke s likovima', 'Share': 'Dijeli', 'Share a common Marker (unless over-ridden at the Feature level)': 'Podijeli zajednički marker (osim ukoliko nije zamijenjeno na nivou karakteristika)', 'shaved': 'obrijano', 'Shelter': 'Sklonište', 'Shelter & Essential NFIs': 'Sklonište & osnovni neprehrambeni artikli', 'Shelter added': 'Sklonište dodato', 'Shelter deleted': 'Sklonište obrisano', 'Shelter Details': 'Detalji o skloništu', 'Shelter Manager': 'Menadžer skloništa', 'Shelter Name': 'Naziv sklonista', 'Shelter Registry': 'Registar skloništa', 'Shelter Service': 'Usluge u skloništu', 'Shelter Service added': 'Usluga skloništa dodana', 'Shelter Service deleted': 'Usluga skloništa obrisana', 'Shelter Service Details': 'Detalji usluga skloništa', 'Shelter Service updated': 'Usluga skloništa ažurirana', 'Shelter Services': 'Usluga skloništa', 'Shelter Settings': 'Postavke skloništa', 'Shelter Status': 'Status skloništa', 'Shelter Status added': 'Status skloništa dodan', 'Shelter Status deleted': 'Status skloništa obrisan', 'Shelter Status Details': 'Detalji statusa skloništa', 'Shelter Status updated': 'Status skloništa ažuriran', 'Shelter Statuses': 'Statusi skloništa', 'Shelter Type': 'Tip skloništa', 'Shelter Type added': 'Tip skloništa dodan', 'Shelter Type deleted': 'Tip skloništa obrisan', 'Shelter Type Details': 'Detalji o tipu skloništa', 'Shelter Type updated': 'Tip skloništa ažuriran', 'Shelter Types': 'Tipovi skloništa', 'Shelter Types and Services': 'Vrste i usluge skloništa', 'Shelter updated': 'Sklonište ažurirano', 'Shelter/NFI Assistance': 'Sklonište/Pomoć u neprehrambenim artiklima', 'Shelter/NFI assistance received/expected': 'Sklonište/Pomoć u neprehrambenim artiklima/očekivano', 'Shelters': 'Skloništa', 'shift_start': 'pomjeranje_početka', 'Shipment': 'Pošiljka', 'Shipment Created': 'Pošiljka napravljena', 'Shipment Details': 'Detalji pošiljke', 'Shipment Item deleted': 'Predmet dostave obrisan', 'Shipment Item Details': 'Detalji stavki pošiljke', 'Shipment Item updated': 'Stavka dostave ažurirana', 'Shipment Items': 'Stavke pošiljke', 'Shipment Items Received': 'Primljene stavke pošiljke', 'Shipment Items received by Inventory': 'Pošiljke primljene u skladište', 'Shipment Items sent from Inventory': 'Isporuke stavki poslatih iz inventara', 'Shipment Items sent from Warehouse': 'Isporuke stvaki poslatih iz skladišta', 'Shipment received': 'Primljena pošiljka', 'Shipment to Receive': 'Pošiljka za prijem', 'Shipment to Send': 'Pošiljka za poslati', 'Shipment Type': 'Tip pošiljke', 'Shipment/Way Bills deleted': 'Dostavnica/putni nalog obrisana', 'Shipments': 'Pošiljke', 'Shipments To': 'Pošiljka do', 'Shipping Organization': 'Organizacija dostave', 'Shooting': 'Pucnjava', 'short': 'kratki', 'Short Assessment': 'Kratka procjena', 'Short Description': 'Kratak Opis', 'Short Description:': 'Kratak opis:', 'Short Text': 'Kratki tekst', 'Short Title / ID': 'Kratki naslov / ID', 'short<6cm': 'kratak<6cm', 'Show': 'Prikaži', 'Show %(number)s entries': 'Prikaži %(number)s članova', 'Show author picture?': 'Prikaži sliku autora', 'Show Checklist': 'Prikaži kontrolnu listu', 'Show Details': 'Prikaži detalje', 'Show in Menu?': 'Prikazati u Meniju ?', 'Show Location?': 'Prikaži lokaciju?', 'Show Map': 'Prikaži kartu', 'Show on map': 'Prikaži na mapi', 'Show on Map': 'Prikaži na karti', 'Show Region in Menu?': 'Pokaži regiju u meniju?', 'Show Table': 'Prikaži tabelu', 'Show totals': 'Prikaži sumarne kolone', 'Showing 0 to 0 of 0 entries': 'Prikaz 0 do 0 od 0 unosa', 'Showing _START_ to _END_ of _TOTAL_ entries': 'Prikazujem _START_ do _END_ od _TOTAL_ zapisa', 'Showing latest entries first': 'Zadnji unosi se prvi prikazuju', 'sides': 'strane', 'Sierra Leone': 'Sierra Leone', 'Sign-up as a volunteer': 'Prijavi se kao volonter', 'Sign-up for Account': 'Prijavi se za Račun', 'sign-up now': 'Prijavi se sada', 'Sign-up succesful - you should hear from us soon!': 'Prijava uspješna, uskoro ćemo Vam se javiti!', 'Signature': 'Potpis', 'Signature / Stamp': 'Potpis / Pečat', 'simple': 'jednostavan', 'Sindhi': 'Sindi', 'Singapore': 'Singapur', 'single': 'samac', 'Single PDF File': 'Jedan PDF dokument', 'Site': 'Lokacija', 'Site Administration': 'Administracija lokacije', 'Site Contact': 'Kontakt mjesta', 'Site ID': 'ID mjesta', 'Site Key': 'Ključ mjesta', 'Site Key which this site uses to authenticate at the remote site (if required for this type of repository).': 'Ključ sajta kojim se on prijavljuje na udaljeni sajt (ako je potrebno za ovu vrstu repozitorija).', 'Site Location Description': 'Opis lokacije mjesta', 'Site Location Name': 'Naziv lokacije mjesta', 'Site Manager': 'Menadžer mjesta', 'Site Name': 'Ime mjesta', 'Site Needs': 'Potrebe mjesta', 'Site Needs added': 'Potrebe mjesta dodane', 'Site Needs deleted': 'Potrebe mjesta obrisane', 'Site Needs updated': 'Potrebe mjesta ažurirane', 'Site/Warehouse': 'Mjesto/Skladište', 'Sites': 'Mjesta', 'SITUATION': 'SITUACIJA', 'Situation': 'Situacija', 'Situation Awareness': 'Svjesnost Situacije', 'Situation Awareness & Geospatial Analysis': 'Svjesnost Situacije & Geoprostorna Analiza', 'Situation Map': 'Mapa situacije', 'Size of cache:': 'Veličina keša:', 'Skeleton Example': 'Primjer skeleta', 'Sketch': 'Nacrt', 'Skill': 'Vještina', 'Skill added': 'Vještina dodana', 'Skill added to Request': 'Vještina dodana zahtjevu', 'Skill Catalog': 'Katalog vještina', 'Skill deleted': 'Vještina obrisana', 'Skill Details': 'Detalji VJEŠTINE', 'Skill Equivalence': 'Ekvivalencija vještine', 'Skill Equivalence added': 'Dodata ekvivalencija vještine', 'Skill Equivalence deleted': 'Obrisana ekvivalencija vještine', 'Skill Equivalence Details': 'Detalji ekvivalentnosti vještina', 'Skill Equivalence updated': 'Ažurirana ekvivalencija vještine', 'Skill Equivalences': 'Ekvivalencije vještina', 'Skill Provision': 'Pružanje vještina', 'Skill Provision added': 'Pružanje vještine dodato', 'Skill Provision Catalog': 'Katalog pribavljanja vještina', 'Skill Provision deleted': 'Pružanje vještina obrisano', 'Skill Provision Details': 'Detalji o pružanju vještina', 'Skill Provision updated': 'Provizija vjestina ažurirana', 'Skill Provisions': 'Odredba Vještina', 'Skill removed': 'Uklonjena vještina', 'Skill removed from Request': 'Uklonjena vještina iz Zahtjeva', 'Skill Status': 'Status vještina', 'Skill TYpe': 'Vrsta vještine', 'Skill Type': 'Tip sposobnosti', 'Skill Type added': 'Dodan tip vještine', 'Skill Type Catalog': 'Katalog vrsta vještina', 'Skill Type deleted': 'Obrisan tip vještine', 'Skill Type Details': 'Detalji tipa sposobnosti', 'Skill Type updated': 'Ažuriran tip vještine', 'Skill Types': 'Tipovi vještina', 'Skill updated': 'Vještina ažurirana', 'Skills': 'Vještine', 'Skills Catalog': 'Katalog vještina', 'Skills Management': 'Upravljanje vještinama', 'Skin Marks': 'Oznake na koži', 'slight': 'pomalo', 'slim': 'vitak', 'Slope failure, debris': 'Propast padine , krhorine', 'Slovakia': 'Slovačka', 'Slovenia': 'Slovenija', 'small': 'mali', 'Small scale mitigation': 'Smanjenje u malom stepenu', 'Small Trade': 'Mala trgovina', 'Smoke': 'Dim', 'Smoking habits': 'Pušačke navike', 'SMS added': 'SMS dodan', 'SMS deleted': 'SMS obrisan', 'SMS Details': 'Detalji o SMS', 'SMS Gateway Settings': 'Postavke SMS izlaza', 'SMS InBox': 'SMS dolazne poruke', 'SMS Modems (Inbound & Outbound)': 'SMS Modemi (ulazni i izlazni)', 'SMS Outbound': 'SMS van granica', 'SMS Outbound Gateway': 'SMS izlaz', 'SMS Outbound Gateway updated': 'SMS mrežni izlaz ažuriran', 'SMS OutBox': 'SMS odlazne poruke', 'SMS Settings': 'SMS Postavke', 'SMS settings updated': 'Postavke SMS-a su ažurirane', 'SMS updated': 'SMS ažuriran', 'SMS via SMTP (Outbound)': 'SMS preko SMTP (izlazni)', 'SMS WebAPI (Outbound)': 'SMS WebAPI (izlazni)', 'SMTP to SMS settings updated': 'SMTP u SMS postavke ažurirane', 'Snapshot': 'Snimak stanja', 'Snapshot Report': 'Kratko izvješće', 'Snow Fall': 'Sniježne padavine', 'Snow Squall': 'Snježna oluja', 'Social': 'Društveno', 'Soil bulging, liquefaction': 'Ispupčenje tla, rastapanje', 'Soliciting Cash Donations?': 'Iznuđene donacije u gotovini?', 'Solicitudes': 'Zabrinutost', 'Solid waste': 'Kruti otpad', 'Solution': 'Rješenje', 'Solution added': 'Rješenje dodano', 'Solution deleted': 'Rješenje izbrisano', 'Solution Details': 'Detalji rješenja', 'Solution Item': 'Stavka rješenja', 'Solution updated': 'Rješenja izmjenjena', 'Solutions': 'Rješenja', 'Somalia': 'Somalija', 'Some': 'Neki', 'Sorry - the server has a problem, please try again later.': 'Izvinjavamo se - problem sa serverom, molimo pokušajte kasnije.', 'Sorry location %(location)s appears to be outside the area of parent %(parent)s.': 'Žao nam je, ali izgleda da se lokacija %(location)s nalazi izvan oblasti roditelja %(parent)s..', 'Sorry location %(location)s appears to be outside the area supported by this deployment.': 'Izvinite ali lokacija %(location)s je izvan područja koje je podržano ovom instalacijom.', 'Sorry location appears to be outside the area of parent %(parent)s.': 'Žao nam je, ali izgleda da se lokacija nalazi izvan oblasti roditelja %(parent)s..', 'Sorry location appears to be outside the area supported by this deployment.': 'Izvinite ali ta lokacija je izvan područja koje je podržano ovom instalacijom', 'Sorry that location appears to be outside the area of the Parent.': 'Žao nam je, ali izgleda da se lokacija nalazi izvan oblasti roditelja.', 'Sorry that location appears to be outside the area supported by this deployment.': 'Nažalost ali ta lokacija je izvan područja koje je podržano ovim sistemom.', 'Sorry, I could not understand your request': 'Oprostite, ne mogu razumjeti vaš zahtjev.', 'Sorry, only users with the MapAdmin role are allowed to create location groups.': 'Nažalost samo korisnicima sa MapAdmin ulogom je dozvoljeno kreiranje grupa lokacija.', 'Sorry, only users with the MapAdmin role are allowed to edit these locations': 'Žao nam je, samo korisnici sa ulogom administratora mape imaju dozvolu da uređuju ove lokacije', 'Sorry, something went wrong.': 'Žao nam je, dogodila se greška.', 'Sorry, that page is forbidden for some reason.': 'Nažalost, ta stranica je zabranjena iz nekog razloga.', 'Sorry, that service is temporary unavailable.': 'Žao nam je, ova usluga je trenutno nedostupna.', 'Sorry, there are no addresses to display': 'Izvinite, ne postoje adrese za prikaz', "Sorry, things didn't get done on time.": 'Žao nam je, stvari nisu završene na vrijeme', "Sorry, we couldn't find that page.": 'Izvinite, ta stranica nije pronađena.', 'Source': 'Izvor', 'source': 'target', 'Source deleted': 'Izvor izbrisan', 'Source ID': 'Identifikacija izvora', 'Source Link': 'Izvorna veza', 'Source Name': 'Ime izvora ', 'Source of Information': 'Izvor informacije', 'Source Time': 'Izvorno vrijeme', 'Source updated': 'Izvor ažuriran.', 'Source URL': 'URL izvora', 'Sources': 'Izvori', 'Sources of income': 'Izvori prihoda', 'South Africa': 'Južna Afrika', 'South Ossetia': 'Južna Osetija', 'Space Debris': 'Svemirski otpad', 'Spain': 'Španija', 'Spanish': 'Španski', 'Special Ice': 'Specijalni led', 'Special Marine': 'Posebna mornarica', 'Special Multirisk Protection Vehicle': 'Specijalna varijabla za zaštitu od višestrukog rizika', 'Special needs': 'Specijalne potrebe', 'Specialized Hospital': 'Specijalizovana Bolnica', 'Specific Area (e.g. Building/Room) within the Location that this Person/Group is seen.': 'Tačno mjesto (npr. zgrada / soba) u okviru lokacije na kojem je ta osoba/grupa viđena.', 'Specific locations need to have a parent of level': 'Specifične lokacije moraju imati roditelja nivoa', 'Specific Operations Vehicle': 'Vozilo za specifične poslove', 'specify': 'precizirati', 'Specify a descriptive title for the image.': 'Specificiraj opisni naslov za sliku', 'Specify the bed type of this unit.': 'Specifikuj tip kreveta za ovu jedinicu', 'Specify the minimum sustainability in weeks or days.': 'Navedi minimalnu održivosr u sedmicama ili danima.', 'Specify the number of available sets': 'Specificiraj broj raspoloživih setova', 'Specify the number of available units (adult doses)': 'Specificiraj broj dostupnih jedinica (odrasle doze)', 'Specify the number of available units (litres) of Ringer-Lactate or equivalent solutions': 'Specificirajte broj dostupnih jedinica (litara) Ringer-Laktata ili ekvivalentnih rastvora', 'Specify the number of sets needed per 24h': 'Specificirati broj skupova potrebnih za 24h', 'Specify the number of units (adult doses) needed per 24h': 'Specificiraj broj jedinica (doza za odrasle) potrebnih u 24 sata', 'Specify the number of units (litres) of Ringer-Lactate or equivalent solutions needed per 24h': 'Naznačite broj jedinica (litara) Ringer-laktata ili ekvivalentnih rastvora potrebnih za 24h', 'Speed': 'Brzina', 'Spherical Mercator (900913) is needed to use OpenStreetMap/Google/Bing base layers.': 'Sferni Mercator (900913) je potreban za upotrebu OpenStreetMap/Google/Bing baznih slojeva.', 'Spherical Mercator?': 'Sferna Merkatorova?', 'Spreadsheet': 'Tabela (spreadsheet)', 'Spreadsheet Importer': 'Uvoznik tabelarnog prikaza', 'Spreadsheet uploaded': 'Učitan tabelarni prikaz', 'Spring': 'Proljeće', 'Squall': 'Udar vjetra', 'squint-eyed': 'razrok', 'Sri Lanka': 'Šri Lanka', 'Staff': 'Osoblje', 'staff': 'osoblje', 'Staff & Volunteers': 'Osoblje i volonteri', 'Staff & Volunteers (Combined)': 'Osoblje i volonteri (kombinovano)', 'Staff 2': 'Osoblje 2', 'Staff added': 'Član osoblja dodan', 'Staff and Volunteers': 'Osoblje i volonteri', 'Staff Assigned': 'Dodijeljeno osoblje', 'Staff Assignment Details': 'Detalji o dodjeli osoblja', 'Staff Assignment removed': 'Obrisana dodjela osoblja', 'Staff Assignment updated': 'Dodjela osoblja ažurirana', 'Staff Assignments': 'Dodjele osoblja', 'Staff deleted': 'Osoblje obrisano', 'Staff ID': 'ID osoblja', 'Staff Management': 'Upravljanje osobljem', 'Staff member added': 'Član osoblja dodan', 'Staff Member added': 'Član osoblja dodan', 'Staff Member deleted': 'Član osoblja obrisan', 'Staff Member Details': 'Detalji o članovima osoblja', 'Staff Member Details updated': 'Detalji Član osoblja ažuriran', 'Staff Members': 'Članovi osoblja', 'staff members': 'članovi osoblja', 'Staff present and caring for residents': 'Osoblje je prisutno i brine za stanovnike', 'Staff Record': 'Zapis o osoblju', 'Staff Report': 'Izvještaj o osoblju', 'Staff Type added': 'Tip osoblja dodan', 'Staff Type deleted': 'Tip osoblja obrisan', 'Staff Type Details': 'Detaljii o osoblju', 'Staff Type updated': 'Tip osoblja ažuriran', 'Staff Types': 'Vrste osoblja', 'Staff updated': 'Osoblje ažurirano', 'Staff with Contracts Expiring in the next Month': 'Osoblje čiji ugovor ističe sljedećeg mjeseca', 'Staff/Volunteer': 'Osoblje/volonteri', 'Staff/Volunteer Record': 'Zapis o osoblju/volonterima', 'Staff2': 'Osoblje2', 'Staffing': 'Zapošljavanje', 'Staffing Level': 'Nivo osoblja', 'Stairs': 'Stepenice', 'Start Date': 'Datum početka', 'Start date': 'Datum početka', 'Start of Period': 'početak perioda', 'state': 'stanje', 'State': 'Država', 'State / Province': 'Entitet / Savezna država', 'state location': 'položaj države', 'Stationery': 'Školski pribor', 'Statistics': 'Statistika', 'Status': 'Status', "Status 'assigned' requires the %(fieldname)s to not be blank": "Status 'dodijeljen' zahtijeva da %(fieldname)s nije prazno", 'Status added': 'Dodat status', 'Status deleted': 'Obrisan status', 'Status Details': 'Detalji statusa', 'Status of clinical operation of the facility.': 'Status kliničkog rada objekta.', 'Status of general operation of the facility.': 'Status generalnih operacija objekata', 'Status of morgue capacity.': 'Status kapaciteta mrtvačnice', 'Status of operations of the emergency department of this hospital.': 'Operativni status hitnog odjela ove bolnice', 'Status of operations/availability of emergency medical services at this facility.': 'Status operacija/dostupnosti hitnih medicinskih usluga na ovom objektu.', 'Status of security procedures/access restrictions for the facility.': 'Status sigurnosnih procedura/ograničenja pristupa za ovaj objekat.', 'Status of security procedures/access restrictions in the hospital.': 'Status sigurnosnih procedura/zabrane pristupa u bolnicama.', 'Status of the clinical departments.': 'Status kliničkih odjela', 'Status of the facility.': 'Stanje objekta', 'Status of the operating rooms of this facility.': 'Status radnih prostorija na ovom objektu', 'Status of the operating rooms of this hospital.': 'Status operacionih sala u ovoj bolnici.', 'Status Report': 'Izvještaj o stanju', 'Status Report added': 'Statusni izvještaj dodan', 'Status Report deleted': 'Statusni izvještaj obrisan', 'Status Report updated': 'Statusni izvještaj ažuriran', 'Status Updated': 'Status ažuriran', 'Status updated': 'Ažuriran status', 'Statuses': 'Statusi', 'Steel frame': 'Čelični okvir', 'Stock': 'Zaliha', 'Stock added to Warehouse': 'Zaliha dodana u skladište', 'Stock Adjustment': 'Prilagođenje zalihe', 'Stock Adjustment Details': 'Prilagođenje zalihe', 'Stock Adjustments': 'Detalji prilagođenja zaliha', 'Stock Count created': 'Broj zaliha kreiran', 'Stock Count deleted': 'Količina zalihe obrisana', 'Stock Count Details': 'Detalji o broju zaliha', 'Stock Count modified': 'Broj zaliha izmijenjen', 'Stock Counts': 'Količine zaliha', 'Stock Expires %(date)s': 'Zaliha ističe %(date)s', 'Stock in Warehouse': 'Zaliha u skladištu', 'Stock removed from Warehouse': 'Zaliha uklonjena iz skladišta', 'Stolen': 'Ukradeno', 'Storage Bin': 'Korpa za čuvanje', 'Storage Bin added': 'Dodana korpa za smještaj', 'Storage Bin deleted': 'Obrisana smještajna korpa', 'Storage Bin Details': 'Detalji korpe za smještaj', 'Storage Bin Number': 'Broj smještajne korpe', 'Storage Bin Type added': 'Dodana vrsta korpe za smještaj', 'Storage Bin Type deleted': 'Tip korpe za smještaj obrisan', 'Storage Bin Type Details': 'Detalji korpe za smještaj', 'Storage Bin Type updated': 'Korpa za smještaj ažurirana', 'Storage Bin Types': 'Vrste smještajnih korpi', 'Storage Bins': 'Korpe za smještaj', 'Storage Capacity (m3)': 'Kapacitet smještaja(m3)', 'Storage Location': 'Lokacije skladišta', 'Storage Location deleted': 'Lokacija skladišta obrisana', 'Storage Location ID': 'ID lokacije skladišta', 'Storage Location Name': 'Naziv lokacije skladišta', 'Storage Locations': 'Lokacija skladišta', 'Storage Type': 'Tip smještaja', 'Store spreadsheets in the Eden database': 'Sačuvaj proračunske tablice u Eden bazu podataka', 'Storeys at and above ground level': 'Spratovi na i iznad razine tla', 'Storm Force Wind': 'Olujni Vjetar', 'Storm Surge': 'Olujni val', 'Stowaway': 'Slijepi putnik ', 'straight': 'pravo', 'Strategy': 'Strategija', 'Street (add.)': 'Uca (adresa)', 'Street (continued)': 'Ulica (nastavljena)', 'Street Address': 'Adresa (ulica)', 'Street View': 'Prikaz ulica', 'Streetview Enabled?': 'Streetview omogućen?', 'String used to configure Proj4js. Can be found from %(url)s': 'String korišten za konfiguraciju Proj4js. Može se naći na %(url)s', 'Strong': 'Jako', 'Strong Wind': 'Jak vjetar', 'Structural': 'Strukturalno', 'Structural Hazards': 'Strukturne opasnosti', 'Style': 'Stil', 'Style Field': 'Polje stila', 'Style invalid': 'Neispravan stil', 'Style Values': 'vrijednosti stila', 'Sub Category': 'Potkategorija', 'Sub-type': 'Podtip', 'Subject': 'Tema', 'Submission Succesful': 'Predaja uspješna', 'Submission successful - please wait': 'Slanje uspješno - molimo pričekajte', 'Submission successful - please wait...': 'Podnesak uspješan- molimo pričekajte', 'Submit': 'Unesi', 'submit': 'unesi', 'Submit a request for recovery': 'Podnijeti zahtjev za oporavak', 'Submit New': 'Predaj novi', 'Submit New (full form)': 'Podnesite novi (potpuna forma)', 'Submit New (triage)': 'Navedi novi (trijaža)', 'Submit new Level 1 assessment (full form)': 'Potvrdi novo procjenjivanje prvog nivoa (potpuna forma)', 'Submit new Level 1 assessment (triage)': 'Proslijedi novu procjenu nivoa 1 (trijaža)', 'Submit new Level 2 assessment': 'Podnesi novu procjenu Nivoa 2', 'Submitting information about the individual such as identification numbers, physical appearance, last seen location, status, etc': 'Slanje informacija o pojedincima, poput identifikacionih brojeva, fizičkog izgleda, mjesta gdje je zadnji put viđen, status itd', 'Subscribe': 'Pretplati se', 'Subscription added': 'Dodan potpis', 'Subscription deleted': 'Pretplata obrisana', 'Subscription Details': 'Detalji pretplate', 'Subscription updated': 'Pretplata izmjenjena', 'Subscriptions': 'Pretplate', 'Subscriptions Status': 'Status pretplate', 'Subsector': 'podsektor', 'Subsector added': 'Podsektor dodat', 'Subsector deleted': 'Podsektor izbrisan', 'Subsector Details': 'Detalji o podsektoru', 'Subsector updated': 'Ažuriran podsektor', 'Subsectors': 'Podsektori', 'Subsistence Cost': 'Trošak opstanka', 'SubType of': 'Podtip od', 'Suburb': 'Predgrađe', 'Successfully registered at the repository.': 'Uspješno registrovano na repozitoriju.', 'suffered financial losses': 'uočeni finansijski gubici', 'Sufficient care/assistance for chronically ill': 'Dovoljna briga/pomoć za hronično bolesne', 'Suggest not changing this field unless you know what you are doing.': 'Predlažemo da ne vršite nikakve izmjene ovog polja, osim ako ne znate šta radite.', 'Summary': 'Sažetak', 'Summary by Administration Level': 'Sažetak na administrativnom nivou', 'Summary by Question Type - (The fewer text questions the better the analysis can be)': 'Sumarno po vrsti piranja - (Što je manje tekstualnih pitanja, bolja je analiza=', 'Summary Details': 'Detalji sažetka', 'Summary of Completed Assessment Forms': 'Sažetak završenog formular ocjene', 'Summary of Incoming Supplies': 'Sumarno ulaz zaliha', 'Summary of Releases': 'Sumarno izlaz', 'Sunday': 'Nedjelja', 'Supervisor': 'Nadglednik', 'Supplier': 'Dobavljač', 'Supplier added': 'Dobavljač dodan', 'Supplier deleted': 'Dobavljač obrisan', 'Supplier Details': 'Detalji dobavljača', 'Supplier updated': 'Dobavljač ažuriran', 'Supplier/Donor': 'Dobavljač/donator', 'Suppliers': 'Dobavljači', 'Supply Chain Management': 'Upravljanje lancom zaliha', 'Supply Item Categories': 'kategorije predmeta za snadbjevanje.', 'Support Request': 'Zahtjev za podršku', 'Support Requests': 'Zahtjevi za podršku.', 'supports nurses in the field to assess the situation, report on their activities and keep oversight.': 'podrška medicinskim sestrama vezano za procjenu situacije, praćenje aktivnosti i nagzor', 'Supports the decision making of large groups of Crisis Management Experts by helping the groups create ranked list.': 'Podržava odlučivanje velikih grupa eksperata kriznog menadžmenta pomažući grupama da kreiraju rangirane liste', 'Sure you want to delete this object?': 'Da li ste sigurni da želite da obrišete ovaj objekat?', 'Surgery': 'Operacija', 'Suriname': 'Surinam', 'Surplus': 'Višak vrijednosti', 'Survey Answer': 'Odgovori na ankete', 'Survey Answer added': 'Dodat anketni odgovor', 'Survey Answer deleted': 'Izbrisan odgovor na istraživanje', 'Survey Answer Details': 'Detalji odgovora upitnika', 'Survey Answer updated': 'Odgovori anketa ažurirani', 'Survey Module': 'Modul istraživanja', 'Survey Name': 'Naziv istraživanja', 'Survey Question': 'Anketno pitanje', 'Survey Question added': 'Anketna pitanja dodana', 'Survey Question deleted': 'Pitanje ankete obrisano', 'Survey Question Details': 'Detalji pitanja ankete', 'Survey Question Display Name': 'Naslovno Ime pitanja ankete', 'Survey Question updated': 'Anketno pitanje ažurirano', 'Survey Section': 'Anketna sekcija', 'Survey Section deleted': 'Izbrisan odjeljak istraživanja', 'Survey Section Details': 'Detalji odjeljka ankete', 'Survey Section Display Name': 'Naslovno ime odjeljka ankete', 'Survey Section updated': 'Serija istraživanja ažurirana', 'Survey Series': 'Niz anketa', 'Survey Series added': 'Niz aketa dodan', 'Survey Series deleted': 'Serija anketa obrisana', 'Survey Series Details': 'Detalji toka ankete', 'Survey Series Name': 'Naziv niza anketa', 'Survey Series updated': 'Serija istraživanja ažurirana', 'Survey Template': 'Šablon za anketu', 'Survey Template added': 'Obrazac za Upitnik dodan', 'Survey Template deleted': 'Predložak ankete obrisan', 'Survey Template Details': 'Detalji predloška ankete', 'Survey Template updated': 'Šablon za anketu ažuriran', 'Survey Templates': 'Šabloni anketa', 'Surveys': 'Istraživanja', 'Swaziland': 'Svazilend', 'Sweden': 'Švedska', 'Switch to 3D': 'Prebaci na 3D', 'Switzerland': 'Švajcarska', 'Symbologies': 'Značenje simbola', 'Symbology': 'Značenje simbola', 'Symbology added': 'Značenje simbola dodano', 'Symbology deleted': 'Značenje simbola obrisano', 'Symbology Details': 'Detalji značenja simbola', 'Symbology removed from Layer': 'Značenja simbola uklonjena iz sloja', 'Symbology updated': 'Značenje simbola ažurirano', 'Sync Conflicts': 'Konflikti sinkronizacije', 'Sync History': 'Historija sinhronizovanja', 'Sync Now': 'Sinhroniziraj sad', 'Sync Partners': 'Sinhronizuj partnere', 'Sync Partners are instances or peers (SahanaEden, SahanaAgasti, Ushahidi, etc.) that you want to sync information with. Click on the link on the right to go the page where you can add sync partners, search for sync partners and modify them.': 'Sync Partneri su instance ili saradnici (SahanaEden, SahanaAgasti, Ushahidi, itd.) s kojima želite usklađivati ​​podatke . Kliknite na link na desnoj strani da odete na stranicu na kojoj možete dodati sinhronizacijskog partnera, tražiti sinhronizacijske partnere i mijenjati ih.', 'Sync Password': 'Sinhronizacija lozinki', 'Sync Policy': 'Politika sinhronizacije', 'Sync Pools': 'Sinhronizacija grupisanja', 'Sync process already started on': 'Sinhronizacija procesa već započeta ', 'Sync process already started on ': 'Proces sinhronizacije je već započeo ', 'Sync Schedule': 'Sinkronizirati raspored', 'Sync Schedules': 'Sinhronizacija rasporeda', 'Sync Settings': 'Postavke sikronizacije', 'Sync Settings updated': 'Postavke sinhronizacije su ažurirane', 'Sync Username': 'Sinhronizuj korisničko ime', 'Synchronisation': 'Sinhronizacija', 'Synchronisation - Sync Now': 'Sinhronizacija - sinhronizuj sada', 'Synchronisation History': 'Istorija sinhronizacije', 'Synchronization': 'Usklađivanje', 'Synchronization allows you to share data that you have with others and update your own database with latest data from other peers. This page provides you with information about how to use the synchronization features of Sahana Eden': 'Sinhronizacija vam omogućuje da dijelite vaše podatke sa ostalima i ažurirate vlastitu bazu podataka sa najnovijim podacima od ostalih učesnika. Ova stranica vam pruža informacije o tome kako koristiti pogodnosti Sahana Eden sinhronizacije.', 'Synchronization Conflicts': 'Sinhronizacijski konflikti', 'Synchronization currently active - refresh page to update status.': 'Sinhronizacija trenutno aktivna - osvježite stranicu da ažurirate status.', 'Synchronization Details': 'Detalji sinhronizovani', 'Synchronization History': 'Historija sinhronizacije', 'Synchronization Job': 'Posao sinhronizacije', 'Synchronization Log': 'Zapisnik sinhronizacije', 'Synchronization mode': 'Režim sinhronizacije', 'Synchronization not configured': 'Sinhronizacija nije konfigurisana', 'Synchronization not configured.': 'Sinhronizacija nije konfigurisana', 'Synchronization Peers': 'Sinhronizacijski saradnici', 'Synchronization Schedule': 'Raspored sinhronizacije', 'Synchronization Settings': 'Postavke za sinhronizaciju', 'Synchronization settings updated': 'Sinhronizacijske postavke ažurirabne', 'Syncronisation History': 'Historija sinhronizacije', 'Syria': 'Sirija', 'System keeps track of all Volunteers working in the disaster region. It captures not only the places where they are active, but also captures information on the range of services they are providing in each area.': 'Sistem prati sve volontere koji rade u području katastrofe. Snima ne samo mjesta gdje su aktivni , već također snima podatke u dometu usluga koje se pružaju u svakom području.', "System's Twitter account updated": 'Twitter nalog sistema je ažuriran', 'São Tomé and Príncipe': 'São Tomé i Príncipe', 'Table': 'Tabela', 'table': 'tabela', 'Table name of the resource to synchronize': 'Ime tabele s resursom za sinhronizaciju', 'Table Permissions': 'Dozvole tabele', 'table_name': 'naziv_tabele', 'Tablename': 'Ime tabele', 'Tag': 'Oznaka', 'Tag added': 'Oznaka dodana', 'Tag deleted': 'Oznaka obrisana', 'Tag Details': 'Detalji oznake', 'Tag Post': 'Stavljanje oznake', 'Tag removed': 'Oznaka uklonjena', 'Tag updated': 'Oznaka ažurirana', 'Tags': 'Oznake', 'Taiwan': 'Tajvan', 'Tajikistan': 'Tadžikistan', 'Take shelter in place or per <instruction>': 'Pronađi utočiste u mjestu ili prema <instruction>', 'tall': 'visok', 'Tanzania': 'Tanzanjia', 'Task': 'Zadatak', 'Task added': 'Dodan zadatak', 'Task deleted': 'Obrisan zadatak', 'Task Details': 'Detalji o zadatku', 'Task List': 'Lista zadataka', 'Task removed': 'Zadatak uklonjen', 'Task Status': 'Status zadatka', 'Task updated': 'Ažuriran zadatak', 'Tasks': 'Zaduženja', 'tattooed': 'tetoviran', 'Team': 'Tim', 'Team added': 'Dodan tim', 'Team deleted': 'Obrisan tim', 'Team Description': 'Opis tima', 'Team Details': 'Detalji tima', 'Team Head': 'Vođa time', 'Team ID': 'ID tima', 'Team Leader': 'Vođa tima', 'Team Member added': 'Član grupe dodan', 'Team Members': 'Članovi tima', 'Team Name': 'Naziv tima', 'Team Type': 'Tip tima', 'Team updated': 'Ažuriran tim', 'Teams': 'Timovi', 'technical failure': 'tehnički neuspjeh', 'Technical Support Vehicle': 'Vozila za tehničku podršku', 'Technical testing only, all recipients disregard': 'Samo tehničko ispitivanje, bez obzira na sve primaoce', 'Teeth': 'Zubi', 'Teeth, Dentures': 'Zubi, proteze', 'Teeth, Gaps between front teeth': 'Zubi, razmak između prednjih zuba', 'Teeth, Missing teeth': 'Zubi, nedostajući zubi', 'Teeth, Toothless': 'Zubi, bezub', 'Telecommunications': 'Telekomunikacije', 'Telephone': 'telefon', 'Telephone Details': 'Telefonski detalji', 'Telephony': 'Telefonija', 'Tells GeoServer to do MetaTiling which reduces the number of duplicate labels.': 'Naređuje GeoServeru da uradi MetaTiling što smanjuje broj dupliciranih labela.', 'Temp folder %s not writable - unable to apply theme!': 'Privremeni direktorij %s nije za pisanje - nemoguce staviti temu!', 'Template': 'Predložak', 'Template file %s not readable - unable to apply theme!': 'Datoteka predložaka %s nije čitljiva - ne može se primijeniti tema!', 'Template Name': 'Naziv predloška', 'Template Section added': 'Dodan odjeljak predloška', 'Template Section deleted': 'Obrisan odjeljak predloška', 'Template Section Details': 'Detalji odjeljka predloška', 'Template Section updated': 'Ažuriran odjeljak predloška', 'Template Sections': 'Odjeljci predloška', 'Template Summary': 'Rezime predloška', 'Templates': 'Predlošci', 'Term for the fifth-level within-country administrative division (e.g. a voting or postcode subdivision). This level is not often used.': 'Termin za peti nivo adminstrativne podjele unutar zemlje (npr. glasačko mjesto). Ovaj nivo se ne koristi često.', 'Term for the fourth-level within-country administrative division (e.g. Village, Neighborhood or Precinct).': 'Termin za administrativnu podjelu unutar zemlje na četvrtom nivou (Mjesna zajednica)', 'Term for the primary within-country administrative division (e.g. State or Province).': 'Termin koji se koristi za prvi nivo administrativne podjele (Entitet/Distrikt)', 'Term for the secondary within-country administrative division (e.g. District or County).': 'Termin za administrativnu podjelu drugog nivoa (Kanton/Regija)', 'Term for the third-level within-country administrative division (e.g. City or Town).': 'Termin za administrativnu jedinicu trećeg nivoa (Općina/Opština).', 'Term for the top-level administrative division (i.e. Country).': 'Termin za administrativnu podjelu najvišeg nivoa (Država)', 'Terms of Service': 'Uslovi korištenja', 'Terms of Service\n\nYou have to be eighteen or over to register as a volunteer.': 'Uvjeti korištenja\n\nMorate biti osamnaest ili preko osamnaest da bi bili volonter.', 'Terms of Service\r\n\r\nYou have to be eighteen or over to register as a volunteer.': 'Uvjeti korištenja\r\n\r\nMorate biti osamnaest ili preko osamnaest da bi bili volonter.', 'Terms of Service:': 'Uslovi korištenja:', 'Territorial Authority': 'Teritorijalni autoritet', 'Terrorism': 'Terorizam', 'Tertiary Server (Optional)': 'Tercijarni server (Nije obavezno)', 'Text': 'Tekst', 'Text before each Text Field (One per line)': 'Tekst ispred svakog tekstualnog polja (jedan po redu)', 'Text Colour for Text blocks': 'Boja teksta za tekst blokova', 'Text Direction': 'Smijer teksta', 'Text in Message:': 'Tekst u poruci:', 'Thailand': 'Tajland', 'Thank you for validating your email. Your user account is still pending for approval by the system administator (%s).You will get a notification by email when your account is activated.': 'Hvala na potvrdi svog email-a. Vaš korisnički račun čeka na odobrenje administratora (%s). Dobit ćete obavještenje email-om kad vam račun bude aktiviran.', 'Thanks for your assistance': 'Hvala na Vašoj pomoći', 'The': '!', 'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1 == db.table2.field2" results in a SQL JOIN.': '"Upit" je uslov poput "db.tablela1.polje1==\'vrijednost\'". Nešto poput "db.tabela1.polje1 == db.tabela2.polje2" kao rezultat daje SQL JOIN (spajanje).', 'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': '"Pitanje" je stanje poput "db.tablela1.polje1==\'vrijednost\'". Nešto poput "db.tabela1.polje1 == db.tabela2.polje2" kao rezultat daje SQL JOIN (spajanje).', 'The answers are missing': 'Nedostaju odgovori', 'The area is': 'Površina je', 'The Area which this Site is located within.': 'Podrucje u kojem se nalazi zadano mjesto', 'The Assessment Module stores assessment templates and allows responses to assessments for specific events to be collected and analyzed': 'Modul procjene čuva predloške procjene i omogućava odgovore na procjene za specifične događaje da se sakupe i analiziraju', 'The Assessments module allows field workers to send in assessments.': 'Modul procjena omogućava radnicima na terenu da pošalju procjene.', 'The asset must be assigned to a site OR location.': 'Sredstvo mora biti dodjeljeno mjestu ILI lokaciji', 'The attribute used to determine which features to cluster together (optional).': 'Atribut koji se koristi za određivanje koje se karakteristike zajedno spajaju (opciono).', 'The attribute which is used for the title of popups.': 'Atribut koji se koristi za naslove popup-a.', 'The attribute within the KML which is used for the title of popups.': 'Atribut unutar KML koji se koristi za titulu iskočnih prozora.', 'The attribute(s) within the KML which are used for the body of popups. (Use a space between attributes)': 'KML atribut(i) korišteni za tijelo iskočnih prozora. (atribute razdvojiti praznim znakom)', 'The Author of this Document (optional)': 'Autor ovog dokumenta (opcionalno)', 'The Bin in which the Item is being stored (optional).': 'Korpa u kojoj je stavka smještena (opciono)', 'The body height (crown to heel) in cm.': 'Visina (od glave do pete) u cm.', 'The Building Asssesments module allows building safety to be assessed, e.g. after an Earthquake.': 'Modul za procjenu zgrada omogućava da se ocjeni sigurnost zgrade , n.p.r. poslje zemljotresa.', 'The Camp this person is checking into.': 'Kamp u koji se ova osoba prijavljuje.', 'The Camp this Request is from': 'Kamp iz koga zahtjev potiče', 'The category of the Item.': 'Kategorija stavke', 'The client ID to use for authentication at the remote site (if required for this type of repository).': 'Klijentski ID za autentifikaciju na udaljenom sajtu (ako je potrebno za ovu vrstu repozitorija)-', 'The client secret to use for authentication at the remote site (if required for this type of repository).': 'Klijentska tajna šifra potrebna za autentifikaciju na udaljenom sajtu (ako je potrebno za ovu vrstu repozitorija).', 'The country the person usually lives in.': 'Država u kojoj osoba živi.', 'The Current Location of the Person/Group, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'Trenutna lokacija osobe/grupe, koja može biti generalna(za izvještaje) ili precizna(za prikazivanje na mapi). Unesite nekoliko znakova da pretražite dostupne lokacije.', 'The default Facility for which this person is acting.': 'Zadani objekt za koje data osoba djeluje.', 'The default Facility for which you are acting.': 'Podrazumjevani objekat za koji djelujete.', 'The default Organization for whom this person is acting.': 'Predefinirana organizacija za koju ova osoba djeluje.', 'The default Organization for whom you are acting.': 'Podrazumijevana organizacija za koju djelujete', 'The default policy for data import from this peer.': 'Predefinirana pravila za uvoz podataka od ovog suradnika', 'The descriptive name of the peer.': 'Opisni naziv suradnika', 'The District for this Report.': 'Geografsko područje za ovaj izvještaj', "The Donor(s) for this project. Multiple values can be selected by holding down the 'Control' key.": "Donator(i) za ovaj projekat. Mogu se odabrati višestruke vrijednosti, držanjem pritisnute 'Control' ('Ctrl') tipke.", 'The duplicate record will be deleted': 'Dupli zapis će biti obrisan', 'The Email Address to which approval requests are sent (normally this would be a Group mail rather than an individual). If the field is blank then requests are approved automatically if the domain matches.': 'E-mail adresa na koju su poslani zahtjevi za odobrenjem (obično će ovo biti grupni mail umjesto individualnog). Ako je polje prazno, onda se zahtjevi automatski odobravaju ako se domena podudara.', 'The facility where this position is based.': 'Objekat na kom je ova pozicija bazirana.', 'The first or only name of the person (mandatory).': 'Ime ili jedino ime osobe (obavezno)', 'The following %(new)s %(resource)s have been added': 'Sljedeći %(new)s %(resource)s je dodan', 'The following %(upd)s %(resource)s have been updated': 'Sljedeći %(new)s %(resource)s je ažuriran', 'The following %s have been added': 'Sljedeće %s je dodano', 'The following %s have been updated': 'Sljedeće %s je ažurirano', 'The form of the URL is http://your/web/map/service?service=WMS&request=GetCapabilities where your/web/map/service stands for the URL path to the WMS.': 'Forma URL-a je http://your/web/map/service?service=WMS&request=GetCapabilities gdje vasa your/web/map/service predstavlja URL stazu za WMS', 'The Gambia': 'Gambia', 'The Group whose members can edit data in this record.': 'Grupa čiji članovi mogu uređivati podatke u ovom zapisu', 'The hospital this record is associated with.': 'Bolnica s kojom je zapis povezan', 'The Incident Reporting System allows the General Public to Report Incidents & have these Tracked.': 'Sistem za prijavu incidenata omogućuje javnosti da prijave incidente i da ih prati.', 'The language to use for notifications.': 'Jezik korišten za napomene', 'The language you wish the site to be displayed in.': 'Jezik u kojem želite da stranica bude prikazana.', 'The last known location of the missing person.': 'Zadnja poznata lokacija za nedostajuću osobu', 'The length is': 'Dužina je', 'The level at which Searches are filtered.': 'Nivo na kom su filtrirane pretrage', 'The list of Brands are maintained by the Administrators.': 'Listu marki održavaju administratori.', 'The list of Catalogs are maintained by the Administrators.': 'Listu kataloga održavaju administratori.', 'The list of Item categories are maintained by the Administrators.': 'Lista kataloga stavki koju održavaju administratori.', 'The Location of this Site, which can be general (for Reporting) or precise (for displaying on a Map).': 'Lokacija ovog mjesta, koja može biti općenita (za izvještaje) ili precizna (za prikaz na mapi). ', 'The Location the Person has come from, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'Lokacija odakle osoba dolazi, koja može biti generalna (za izvještavanje) ili precizna (za prikaz na mapi). Unesite nekoliko početnih karaktera za pretragu dostupnih lokacija.', 'The Location the Person is going to, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'Lokacija na koju osoba ide, koja može biti općenita (za izvještaje) ili precizna (za prikaz na mapi). Unesi nekoliko karaktera za pretragu dostupnih lokacija.', 'The map will be displayed initially with this latitude at the center.': 'Mapa će biti prikazana inicijalno, sa ovom geografskom širinom u centru.', 'The map will be displayed initially with this longitude at the center.': 'Karta će biti prvobitno predstavljena sa ovom geografskom dužinom u centru.', 'The Maximum valid bounds, in projected coordinates': 'Maksimalne važeće granice u projektovanim koordinatama', 'The Media Library provides a catalog of digital media.': 'Media Library pruža kataloge digitalnih medija', 'The Media Library provides a catalogue of digital media.': 'Media Library pruža kataloge digitalnih medija', 'The Messaging Module is the main communications hub of the Sahana system. It is used to send alerts and/or messages using SMS & Email to various groups and individuals before, during and after a disaster.': 'Modul za slanje poruke je glavni dio za komunikaciju Sahana sistema. Koristi se za slanje upozorenja/ili poruka koristenjem SMS ili e-maila razlicitim grupama i osoboma, tokom ili poslije nepogode.', 'The minimum number of features to form a cluster.': 'Najmanji broj karakteristika za formiranje skupa.', 'The minimum number of features to form a cluster. 0 to disable.': 'Najmanji broj karakteristika za formiranje skupa. 0 za isključiti-', 'The name to be used when calling for or directly addressing the person (optional).': 'Naziv koji se koristi kada se poziva ili neposredno obraća osobi (opcionalno).', 'The next screen will allow you to detail the number of people here & their needs.': 'Sljedeći ekran će vam omogućiti da opišete broj ljudi ovdje i njihove potrebe.', 'The next screen will allow you to enter a detailed list of items and quantities, if appropriate...': 'Sljedeći ekran će vam omogućiti da unesete detaljan spisak objekata i količina, ako odgovara...', 'The number of pixels apart that features need to be before they are clustered.': 'Koliko piksela oznake karakteristika trebaju biti razdvojene prije njihovog grupisanja', 'The number of tiles around the visible map to download. Zero means that the 1st page loads faster, higher numbers mean subsequent panning is faster.': 'Broj pločica oko vidljive karte za preuzimanje. Nula znači da de 1. stranica učita brže, veće brojke čine da je naknadno paniranje brže.', 'The number of Units of Measure of the Alternative Items which is equal to One Unit of Measure of the Item': 'Broj mjernih jedinica alternativnih stavki koji je jednak jednoj mjernoj jedinici stavke', 'The Office this record is associated with.': 'Ured s kojom je zapis povezan', 'The Organisation which is funding this Activity.': 'Organizacija koja osniva ou aktivnost', 'The Organization Registry keeps track of all the relief organizations working in the area.': 'Registar organizacija zadržava zapise svih potpornih organizacija u radnoj oblasti.', 'The Organization this record is associated with.': 'Organizacija s kojom je zapis povezan', 'The Organization which is funding this Activity.': 'Organizacija koja osniva ovu aktivnost', 'The parse request has been submitted': 'Zahtjev za analizu je bio podnesen', 'The Patient Tracking system keeps track of all the evacuated patients & their relatives.': 'Sistem Praćenja Pacijenta prati sve evakuisane pacijente i njihove porodice.', 'The person at the location who is reporting this incident (optional)': 'Osoba na lokaciji koja prijavljuje ovaj incident (neobavezno)', 'The Person currently filling this Role.': 'Osoba koja trenutno obavlja ovu ulogu', 'The person reporting about the missing person.': 'Osoba koja je prijavila nestanak osobe', 'The person reporting the missing person.': 'Osoba koja je prijavila nestalu osobu', "The person's manager within this Office/Project.": 'Rukovodilac osobe u ovom uredu/projektu', 'The poll request has been submitted, so new messages should appear shortly - refresh to see them': 'Zahtjev za anketom je podnesen, pa bi se nove poruke uskoro trebale pojaviti - osvježite da ih vidite', 'The POST variable containing the phone number': 'POST varijabla koja sadrži telefonski broj', 'The post variable containing the phone number': 'Postavljena varijabla koja sadrži telefonski broj', 'The post variable on the URL used for sending messages': 'Post varijabla u URL koja se koristi za slanje poruka', 'The POST variable on the URL used for sending messages': 'POST varijabla u URL koja se koristi za slanje poruka', 'The post variables other than the ones containing the message and the phone number': 'Varijable objave različite od onih koje sadrže poruku i broj telefona', 'The POST variables other than the ones containing the message and the phone number': 'POST varijable različite od onih koje sadrže poruku i broj telefona', "The Project module can be used to record Project Information and generate Who's Doing What Where reports.": 'Projektni modul se može koristiti za zapis projektnih informacija i generisanje izvještaja "Ko šta radi gdje?".', 'The Project Tracking module allows the creation of Activities to meet Gaps in Needs Assessments.': 'Modul za praćenje projekta omogućuje stvaranje aktivnosti kako bi se ispunile praznine u procjeni potreba.', "The provided 'formuuid' is invalid. You have selected a Form revision which does not exist on this server.": "Navedeni 'formuuid' je nevažeći. Odabrali ste reviziju forme koja ne postoji na ovom serveru.", "The provided 'jobuuid' is invalid. The session of Form upload is invalid. You should retry uploading.": "Navedeni 'jobuuid' je nevažeći. Sesija postavljanja formulara je nevažeća. Trebate ponoviti postavljanje.", 'The Rapid Assessments Module stores structured reports done by Professional Organisations.': 'Modul brze procjene čuva struktuirane izvještaje koje obavljaju profesionalne organizacije', 'The Rapid Assessments Module stores structured reports done by Professional Organizations.': 'Modul brze procjene čuva struktuirane izvještaje koje obavljaju profesionalne organizacije', 'The request this record is associated with.': 'Zahtjev s kojim je ovaj zapis povezan', 'The Request this record is associated with.': 'Zahtjev s kojim je ovaj zapos povezan', 'The Role this person plays within this hospital.': 'Uloga koju ova osoba ima u ovoj bolnici.', 'The Role this person plays within this Office/Project.': 'Uloga koju ova osoba ima u ovom uredu/projektu', 'The Role to which this Role reports.': 'Uloga za koju se ova uloga izvještava.', 'The scanned copy of this document.': 'Skenirana kopija ovog dokumenta', 'The search request has been submitted, so new messages should appear shortly - refresh to see them': 'Zahtjev za pretragom je podnesen, pa bi se nove poruke ubrzo trebale pojaviti. Osvježite da ih vidite.', 'The search results are now being processed with KeyGraph': 'Rezultate pretrage trenutno obrađuje KeyGraph', 'The search results should appear shortly - refresh to see them': 'Rezultati pretrage će se uskoro pojaviti - osvježite da ih vidite', 'The serial port at which the modem is connected - /dev/ttyUSB0, etc on linux and com1, com2, etc on Windows': 'Serijski port na koji je modem priključen - npr. /dev/ttyUSB0 na linuxu i com1,com2 na Windowsu', 'The server did not receive a timely response from another server that it was accessing to fill the request by the browser.': 'Server nije primio pravovremeni odgovor od drugog servera, kojem je pristupao da bi popunio zahtjev od strane pretraživača.', 'The server received an incorrect response from another server that it was accessing to fill the request by the browser.': 'Server je dobio pogrešan odgovor od drugog servera da je pristupio popunjavanju zahtjeva od strane browsera.', 'The Shelter Registry tracks all shelters and stores basic details regarding them. It collaborates with other modules to track people associated with a shelter, the services available etc.': 'Registar skloništa prati sva skloništa i pohranjuje osnovne detalje o njima. U saradnji sa ostalim modulima prati ljude u skloništu, dostupne usluge itd.', 'The Shelter this person is checking into.': 'Sklonište u koje se prijavljuje ova osoba', 'The Shelter this Request is from': 'Sklonište iz kog je ovaj zahtjev', 'The Shelter this Request is from (optional).': 'Sklonište iz kog je ovaj zahtjev (opciono)', 'The site where this position is based.': 'Stranica na kojoj je ova pozicija bazirana.', 'The Source this information came from.': 'Izvor odakle je došla ova informacija', "The staff member's official job title": 'Zvanično radno mjesta člana osoblja', 'The staff responsibile for Facilities can make Requests for assistance.': 'Osoblje odgovorno za karakteristike može načiniti zahtjeve za pomoć.', 'The staff responsibile for Facilities can make Requests for assistance. Commitments can be made against these Requests however the requests remain open until the requestor confirms that the request is complete.': 'Osoblje zaduženo za ustanove može zahtijevati pomoć. Obaveze se mogu vršiti nesaglasno sa tim zahtjevima, ali oni ostaju otvoreni sve dok onaj ko je izdao zahtjev ne potvrdi da je on ispunjen.', 'The subject event no longer poses a threat or concern and any follow on action is described in <instruction>': 'Dotični događaj ne predstavlja više prijetnju niti zabrinjava i svaka slijedeća akcija je objašnjena u <instruction>', 'The subject of the alert (optional)': 'Tema ', 'The synchronization module allows the synchronization of data resources between Sahana Eden instances.': 'Sinhronizacijski modul omogućava sinhronizaciju podataka između kopija Sahana Eden.', 'The system supports 2 projections by default:': 'Sistem podržava 2 projekcije podrazumijevano:', 'The time at which the Event started.': 'Vrijeme u koje je događaj počeo.', 'The time at which the Incident started.': 'Vrijeme u koje je incident počeo.', 'The time difference between UTC and your timezone, specify as +HHMM for eastern or -HHMM for western timezones.': 'Vremenska razlika između UTC i vaše vremenske zone, navesti kao +HHMM za istočne ili -HHMM za zapadne vremenske zone.', 'The title of the page, as seen in the browser (optional)': 'Naslov stranice kakav se vidi u browser programu (opciono)', 'The token associated with this application on': 'Token povezan s ovom aplikacijom na', 'The Tracking Number %s ""is already used by %s.': 'Broj praćenja %s "" je već u upotrebi od strane %s.', 'The Unique Identifier (UUID) as assigned to this facility by the government.': 'Jedinstveni identifikator koji je pridružen ovom objektu od strane vlade', 'The unique identifier of the peer. Leave blank if the peer is no Sahana Eden instance, it will be auto-assigned in that case.': 'Jedinstveni identifikator saradnika. Ostavite prazno ako suradnik nie Sahana Eden instanca, u tom slučaju će biti automatski dodijeljeno.', 'The unique identifier which identifies this instance to other instances.': 'Jedinstveni identifikator koji razlikuje ovu instancu od ostalih.', 'The uploaded Form is unreadable, please do manual data entry.': 'Poslani formular je nečitljiv, molim obavite ručni unos podataka.', 'The URL for the GetCapabilities of a WMS Service whose layers you want accessible via the Map.': 'URL za GetCapabilities stranicu Web Map Service (WMS) čiji slojevi su dostupni na mapi.', 'The URL for the GetCapabilities page of a Web Map Service (WMS) whose layers you want available via the Browser panel on the Map.': 'URL za GetCapabilities stranicu Web Map Service (WMS) čiji slojevi su dostupni na pregledničkom panelu mape.', "The URL of the image file. If you don't upload an image file, then you must specify its location here.": 'URL datoteke slike. Ako ne dodate sliku, morate specificirati lokaciju ovdje.', 'The URL of your web gateway without the POST parameters': 'URL Vašeg web prolaza bez POST parametara', 'The URL of your web gateway without the post parameters': 'URL Vašeg web izlaza bez poštanskih parametara', 'The URL to access the service.': 'URL za pristup usluzi.', "The volunteer's role": 'Volonterska uloga', 'The way in which an item is normally distributed': 'Način na koji je stavka normalno distribuirana', 'The weight in kg.': 'Težina u kilogramima.', 'Theme': 'Tema', 'Theme added': 'Tema dodana', 'Theme added to Activity': 'Tema dodana u aktivnost', 'Theme added to Project': 'Tema dodana u projekat', 'Theme added to Project Location': 'Tema dodana u lokaciju projekta', 'Theme Data': 'Podaci teme', 'Theme Data deleted': 'Podaci teme obrisani', 'Theme Data updated': 'Podaci teme ažurirana', 'Theme deleted': 'Tema obrisana', 'Theme Details': 'Tematski detalji', 'Theme Layer': 'Sloj teme', 'Theme removed from Activity': 'Tema uklonjena iz aktivnosti', 'Theme removed from Project': 'Tema uklonjena iz projekta', 'Theme removed from Project Location': 'Tema uklonjena iz lokacije projekta', 'Theme updated': 'Tema ažurirana', 'Themes': 'Teme', 'There are errors': 'Postoje greške', 'There are insufficient items in the Inventory to send this shipment': 'Nema dovoljno artikala u skladištu kako bi se poslala dostava', 'There are more than %(max)s results, please input more characters.': 'Ima više od %(max)s rezultata, molim unesite više znakova.', 'There are multiple records at this location': 'Ima više zapisa na ovoj lokaciji', 'There are no contacts available for this person!': 'Nema kontakta dostupnih za ovu osobu!', "There are no details for this person yet. Add Person's Details.": 'Nema detalja za ovu osobu. Dodajte detalje osobe', 'There are not sufficient items in the Inventory to send this shipment': 'Nema dovoljno artikala u skladištu kako bi se poslala dostava', 'There are not sufficient items in the store to send this shipment': 'Nema dovoljno artikala u skladištu kako bi se poslala dostava', 'There are too many features, please Zoom In or Filter': 'Ima previše karakteristika, uvećajte sliku ili filtrirajte', 'There is insufficient data to draw a chart from the questions selected': 'Nedovoljno je podataka za iscrtavanje dijagrama iz izabranih pitanja', 'There is no address for this person yet. Add new address.': 'Još ne postoji adresa za ovu osobu. Dodaj novu adresu.', 'There is no status for this %(site_label)s yet. Add %(site_label)s Status.': 'Još nema statusa za %(site_label)s. Dodajte %(site_label)s tatus.', 'There was a problem, sorry, please try again later.': 'Došlo je do problema, ispričavamo se, pokušajte ponovno kasnije.', 'These are settings for Inbound Mail.': 'Ovo su postavke za Inbound Mail', 'These are the filters being used by the search.': 'Postoje filteri korišteni pretragom.', 'These are the Incident Categories visible to normal End-Users': 'Ovo su kategorije slučajeva, vidljive običnim krajnjim korisnicima.', 'These need to be added in Decimal Degrees.': 'Moraju biti uneseni u decimalnim stepenima.', 'They': 'Oni', 'thick': 'debelo', 'thin': 'tanko', 'this': 'ova', 'This adjustment has already been closed.': 'Ovo pilagođenje je već zatvoreno', 'This appears to be a duplicate of': 'Ovo je duplikat od', 'This appears to be a duplicate of ': 'Ovo izgleda kao duplikat od ', 'This email address is already in use': 'Ova email adresa je već u upotrebi', 'This email-address is already registered.': 'Ova email adresa je već registrovana', 'This file already exists on the server as': 'Ovaj fajl vec postoji na serveru kao', 'This form allows the administrator to remove a duplicate location.': 'Ovaj formular omogućava administratoru da ukloni duple lokacije.', 'This Group has no Members yet': 'Ova grupa još nema članova', 'This is appropriate if this level is under construction. To prevent accidental modification after this level is complete, this can be set to False.': 'Ovo je prikladno ako je ovaj nivo u izgradnji. Da bi se spriječile slučajne modifikacije nakon što se ovaj nivo završi, ovo se može postaviti na Netačno', 'This is normally edited using the Widget in the Style Tab in the Layer Properties on the Map.': 'Ovo se normalno uređje koristeći grafičke kontrole u kartici stilova, svojstava sloja na mapi.', 'This is required if analyzing with KeyGraph.': 'Ovo je potrebno ako se analizira s KeyGraph.', 'This is the full name of the language and will be displayed to the user when selecting the template language.': 'Ovo je puno ime jezika i bit će prikazano korisniku kada se odabira jezik predloška.', 'This is the short code of the language and will be used as the name of the file. This should be the ISO 639 code.': 'Ovo je kratko ime jezika i bit će korišteno kao ime datoteke. Ovo treba biti ISO 639 šifra,', 'This is the way to transfer data between machines as it maintains referential integrity.': 'Ovo je način za prenos podataka između mašina, jer održava referencijalni integritet.', 'This is the way to transfer data between machines as it maintains referential integrity...duplicate data should be removed manually 1st!': 'Ovo je način prenosa podataka između mašina koje održavaju referencijalni integritet... Duplicirane datoteke bi prvo trebalo ručno ukloniti!', "This isn't visible to the published site, but is used to allow menu items to point to the page": 'Ovo nije vidjivo objavljenom sajtu, ali se koristi da se dopusti stavkama menija da pokazuju na stranicu', "This isn't visible to the recipients": 'Ovo nije vidljivo primaocima', 'This job has already been finished successfully.': 'Ovaj posao je već uspješno završen', 'This level is not open for editing.': 'Ovaj nivo nije otvoren za izmjene.', 'This might be due to a temporary overloading or maintenance of the server.': 'Ovo može biti zbog privremenog opterećenja ili održavanja servera.', 'This module allows Inventory Items to be Requested & Shipped between the Inventories of Facilities.': 'Ovaj modul omogućava stavke iz inventara da budu zahtjevane i dostavljene između različitih objekata.', 'This module allows Warehouse Stock to be managed, requested & shipped between the Warehouses and Other Inventories': 'Ovaj modul omogućava da se zalihe skladišta održavaju, zahtijevaju i isporučuju između skladišta i drugih mjesta zaliha', 'This module allows you to manage Events - whether pre-planned (e.g. exercises) or Live Incidents. You can allocate appropriate Resources (Human, Assets & Facilities) so that these can be mobilized easily.': 'Ovaj modul omogućuje upravljanje događajima - bilo da su prethodno planirani (npr. vježbe) ili incidenti koji se trenutno odvijaju. Možete dodijeliti odgovarajuća sredstva (ljude, alate i postrojenja), tako da oni mogu biti lako mobilizirani.', 'This module allows you to plan scenarios for both Exercises & Events. You can allocate appropriate Resources (Human, Assets & Facilities) so that these can be mobilized easily.': 'Ovaj modul dopušta planiranje scenarija za vježbe i događaje. Možete alocirati prikladne resurse (ljudstvo, sredstva i objekte) tako da mogu lako mobilizirati.', 'This page shows you logs of past syncs. Click on the link below to go to this page.': 'Ova stranica prikazuje zapisnike prethodnih sinhronizacija. Kliknite na link ispod, kako biste ušli na ovu stranicu.', 'This resource is already configured for this repository': 'Resurs je već konfigurisan za ovaj repozitorij', 'This role can not be assigned to users.': 'Ova uloga se ne može dodijeliti korisnicima', 'This screen allows you to upload a collection of photos to the server.': 'Ovaj prozor Vam dozvoljava da uploadujete kolekciju slika na server.', 'This setting can only be controlled by the Administrator.': 'Ovo podešavanje može kontrolisati samo administrator.', 'This shipment contains %s items': 'Ova dostava sadrži %s stavki', 'This shipment contains one line item': 'Ova isporuka ima stavku od jedne linije', 'This shipment has already been received & subsequently canceled.': 'Ova isporuka je već bila primljena i odmah otkazana.', 'This shipment has already been received.': 'Ova dostava je već primljena.', 'This shipment has already been sent.': 'Ova dostava je već poslana.', 'This shipment has not been received - it has NOT been canceled because it can still be edited.': 'Ova isporuka nije poslana - NIJE otkazana zato što još uvijek može biti preuređena.', 'This shipment has not been returned.': 'Ova isporuka nije vraćena', 'This shipment has not been sent - it cannot be returned because it can still be edited.': 'Ova isporuka nije bila poslana - ne može se vratiti jer ju je još moguće mijenjati.', 'This shipment has not been sent - it has NOT been canceled because it can still be edited.': 'Ova isporuka nije poslana - NIJE otkazana zato što još uvijek može biti preuređena.', 'This shipment will be confirmed as received.': 'Ova isporuka bit će potvrđena prilikom prijema.', 'This should be an export service URL, see': 'Ovo treba biti izvozni URL servisa, vidi', 'This Team has no Members yet': 'Ovaj tim još nema članova', 'Thunderstorm': 'Grmljavina', 'Thursday': 'Četvrtak', 'Ticket': 'Kartica', 'Ticket added': 'Dodana kartica', 'Ticket deleted': 'Kartica je poništena', 'Ticket Details': 'Pojedinosti kartica', 'Ticket ID': 'ID kartice', 'Ticket updated': 'Kartica izmjenjena', 'Ticketing Module': 'Modul sa karticama', 'Tickets': 'Kartice', 'Tiled': 'popločano', 'Tilt-up concrete': 'Ispupčen beton', 'Timber frame': 'Okvir od dasaka', 'Time': 'Vrijeme', 'Time Actual': 'Stvarno vrijeme', 'Time at which data was exchanged.': 'Vrijeme u konme su podaci razmijenjeni', 'Time Estimate': 'Procjena vremena', 'Time Estimated': 'Potrebno vrijeme', 'Time Frame': 'Vremenski okvir', 'Time In': 'Vrijeme unutra', 'Time in Cache (h:m:s)': 'Vrijeme u kešu (h:m:s)', 'Time Log': 'Vremenski zapis', 'Time Log Deleted': 'Vremenski zapis izbrisan', 'Time Log Updated': 'Vremenski zapis ažuriran', 'Time Logged': 'Vrijeme prijave', 'Time needed to collect water': 'Vrijeme potrebno za sakupljanje vode', 'Time of Request': 'Vrijeme zahtjeva', 'Time Out': 'Vrijeme vani', 'Time Question': 'Vremensko pitanje', 'Time Taken': 'Potrošeno vrijeme', 'Timeline': 'Vremenska crta', 'Timeline Report': 'Izvještaj o vremenskom okviru', 'times': 'puta', 'times (0 = unlimited)': 'puta (0=neograničeno)', 'times and it is still not working. We give in. Sorry.': 'puta i još uvjek ne radi. Odustajemo. Žao nam je.', 'Times Completed': 'Puta završen', 'Timestamp': 'Vremenska oznaka', 'Timestamps can be correlated with the timestamps on the photos to locate them on the map.': 'Vremenske oznake se mogu povezati sa oznakama na fotografijama kako bi ih locirali na karti.', 'Title': 'Naslov', 'Title to show for the Web Map Service panel in the Tools panel.': 'Naziv koji će se prikazati za panel Usluge Web Mape u panelu Alati.', 'TMS Layer': 'TMS sloj', 'To': 'Za', 'To %(site)s': 'Za %(site)s', 'To access Sahana documentation, go to': 'Da pristupite Sahana dokumentaciji, idite na', 'to access the system': 'da pristupite sistemu', 'To begin the sync process, click the button on the right =>': 'Da se započne proces sinhronizacije, pritisnite dugme desno =>', 'To begin the sync process, click the button on the right => ': 'Da biste započeli proces sinhronizacije, kliknite na dugme desno => ', 'To begin the sync process, click this button =>': 'Da biste započeli proces sinhronizacije, pritisnite ovo dugme =>', 'To begin the sync process, click this button => ': 'Da biste započeli proces sinhronizacije, pritisnite ovo dugme => ', 'To create a personal map configuration, click': 'Da kreirate konfiguraciju lične mape, kliknite', 'To create a personal map configuration, click ': 'Za kreiranje konfiguracije lične mape, pritisnite ', 'to download a OCR Form.': 'da bi se skinula OCR forma.', 'To edit OpenStreetMap, you need to edit the OpenStreetMap settings in models/000_config.py': 'Za uređivanje OpenStreetMap, potrebno je urediti OpenStreetMap opcije u modelima/000_config.py', 'To edit OpenStreetMap, you need to edit the OpenStreetMap settings in your Map Config': 'Da uredite OpenStreetMap, trebate promijeniti OpenStreetMap postavke u konfiguraciji mape', 'To Location': 'Prema lokaciji', 'To move the Timeline: use the mouse scroll wheel, the arrow keys or grab and drag the Timeline.': 'Da pomjerite vremenski liniju: koristite točkić miša, tastere s strelicama ili grabite i prevucite vremensku liniju', 'To Organization': 'Za organizaciju', 'To Person': 'Za osobu', 'To Print or Share the Map you will have to take a screenshot. If you need help taking a screen shot, have a look at these instructions for %(windows)s or %(mac)s': 'Da štampate ili dijelite mapu trebate uzeti sliku ekrana. Ako vam treba pomoć oko uzimanja slike ekrana pogledajte instrukcije za %(windows)s ili %(mac)s', 'to reset your password': 'da resetujete lozinku', 'To search by job title, enter any portion of the title. You may use % as wildcard.': 'Za pretraživanje po nazivu posla, unesi bilo koji dio naziva. Mozete koristiti % kao džoker znak', "To search by person name, enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": "Da biste izvršili pretragu po imenu osobe, unesite bilo koje od imena, srednjih imena ili prezimena, razdvojenih razmacima. Možete koristiti % kao znak koji će zamijeniti bilo koji karakter ili niz karaktera. Pritisnite 'Traži' bez ikakvog unosa da biste izlistali sve osobe.", 'To search for a body, enter the ID ""tag number of the body. You may use ""% as wildcard.': 'Da tražite tijelo, unesite ID "" broj oznake tijela. Možete koristiti ""% kao džoker.', "To search for a body, enter the ID label of the body. You may use % as wildcard. Press 'Search' without input to list all bodies.": 'Za traženje tijela, unesite ID tag broj tijela. Možete koristiti % kao dzoker. Pritisnite "Trazi" bez ulaza na popis svih tijela.', "To search for a body, enter the ID tag number of the body. You may use % as wildcard. Press 'Search' without input to list all bodies.": 'Za traženje tijela, unesite ID oznaku tijela. Možete koristiti % kao dzoker. Pritisnite "Traži" bez ulaza na popis svih tijela.', "To search for a hospital, enter any of the names or IDs of the hospital, or the organisation name or acronym, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all hospitals.": "Da biste potražili bolnicu, unesite bilo koje od imena ili IDova bolnice, ime organizacije ili njen akronim razdvojeno razmakom. Možete koristiti % kao zamjenske karaktere. Pritisnite 'Pretraži' bez unesenih stavki da izlistate sve bolnice.", "To search for a hospital, enter any of the names or IDs of the hospital, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all hospitals.": "Za pretragu bolnica, unesite bilo koje od imena ili pripadni broj bolnice, sa razmacima. Možete koristiti i % umjesto razmaka. Pritisnite 'Search' (traži) i bez nabrajanja svih bolnica .", "To search for a location, enter the name. You may use % as wildcard. Press 'Search' without input to list all locations.": "Da tražite lokaciju, ukucajte ime. Možete koristiti % kao zamjenu. Pritisnite 'Search ' bez unosa da izlistate sve lokacije", "To search for a member, enter any portion of the name of the person or group. You may use % as wildcard. Press 'Search' without input to list all members.": "Da tražite člana unesite neki dio imena osobe ili grupu. Možete koristiti % kao džoker. Pritisnite 'Traži' bez unosa za prikaz svih članova.", "To search for a patient, enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all patients.": "Za pretragu pacijenta unesite ime, prezime ili srednje ime odvojene razmakom. Mozete koristiti % kao zamjenu. Pritisnite 'Pretraga' bez unesenih vrijednosti za ispis svih pacijenata.", 'To search for a person, enter any of the ""first, middle or last names and/or an ID ""number of a person, separated by spaces. ""You may use % as wildcard.': 'Da tražite osobu unesite "" prvo, srednje ime ili prezime i/ili ID "" broj osobe, razdvojen razmacima. ""Moćete koristiti % kao džoker.', "To search for a person, enter any of the first, middle or last names and/or an ID number of a person, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": 'Da biste tragali za osobom, unesite bilo koje - ime, srednje ime ili prezime i broj lične karte osobe, odvojeno razmacima. Možete koristiti znak % umjesto džokera. Pritisnite "Traži" bez ulaza da vam izlista sve osobe.', "To search for a person, enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": "Da biste izvršili pretragu po imenu osobe, unesite bilo koje od imena, srednjih imena ili prezimena, razdvojenih razmacima. Možete koristiti % kao znak koji će zamijeniti bilo koji karakter ili niz karaktera. Pritisnite 'Search' (Pretraga) bez ikakvog unosa da biste izlistali sve osobe.", "To search for a request, enter some of the text that you are looking for. You may use % as wildcard. Press 'Search' without input to list all requests.": 'Za traženje zahtjeva unesite neki tekst koji tražite. Možete koristiti % kao dzoker. Pritisnite "Traži" bez ulaza na popis svih tijela.', "To search for an assessment, enter any portion the ticket number of the assessment. You may use % as wildcard. Press 'Search' without input to list all assessments.": 'Za traženje procjene ukucajte bilo koji dio broja kartice za procjenu. Možete koristiti % kao dzoker. Pritisnite "Trazi" bez ulaska u cijelu listu procjena.', 'To Site': 'Za mjesto', 'To submit a new job, use the': 'Da unesete novi posao, koristite', 'To variable': 'Do varijable', 'to verify your email': 'da potvrdite vaš email', 'ton': 'tona', 'tonsure': 'ćela s vijencom kose', 'Tools': 'Alatke', 'Tools & Equipment': 'Alati i oprema', 'total': 'ukupno', 'Total': 'Ukupno', 'Total # of Beneficiaries Reached': 'Ukupno # ciljanih korisnika dosegnuto', 'Total # of households of site visited': 'Ukupan broj posjećenih domaćinstava mjesta', 'Total # of Target Beneficiaries': 'Ukupno # ciljanih korisnika', 'Total Affected': 'Ukupno oštećenih', 'Total Annual Budget': 'Ukupni godišnji budžet', 'Total Beds': 'Ukupno kreveta', 'Total Beneficiaries': 'Ukupno korisnika', 'Total Cost': 'Ukupni trošak', 'Total Cost per Megabyte': 'Ukupan trošak po megabajtu', 'Total Cost per Minute': 'Ukupni troškovi po minuti', 'Total Dead': 'Ukupno mrtvih', 'Total Funding (Local Currency)': 'Ukupni fondovi (lokalna valuta=', 'Total Funding Amount': 'Ukupan iznos fonda', 'Total gross floor area (square meters)': 'Ukupna površina poda (u kvadratnim metrima)', 'Total Households': 'Ukupan broj domaćinstava', 'Total Injured': 'Ukupno povrijeđenih', 'Total Locations': 'Ukupno lokacija', 'Total Monthly': 'Ukupno mjesečno', 'Total Monthly Cost': 'Ukupni mjesečni trošak', 'Total Monthly Cost:': 'Ukupni mjesečni trošak:', 'Total Monthly Cost: ': 'Ukupni mjesečni trošak: ', 'Total No of Affectees (Including Students, Teachers & Others)': 'Ukupan broj obuhvaćenih (uključujući učenike, nastavnike i ostale)', 'Total No of Students (Primary To Higher Secondary) in the Total Affectees': 'Ukupan broj učenika (osnovne i srednje škole) od ukupno pogođenih', 'Total No of Teachers & Other Govt Servants in the Total Affectees': 'Ukupno u nastavi i drugim vladinim uslugama u ukupnom broju pogođenih', 'Total number of beds in this facility. Automatically updated from daily reports.': 'Ukupan broj kreveta u ovom objektu. Automatski se ažurira iz dnevnih izvještaja', 'Total number of beds in this hospital. Automatically updated from daily reports.': 'Ukupan broj kreveta u ovoj bolnici. Automatski se ažurira iz dnevnih izvještaja.', 'Total number of houses in the area': 'Ukupan broj kuća u području', 'Total Number of Resources': 'Ukupan broj resursa', 'Total number of schools in affected area': 'Ukupni broj škola u zahvaćenim područjima', 'Total One-time Costs': 'Ukupni jednokratni troškovi', 'Total Persons': 'Ukupan broj osoba', 'Total Population': 'Ukupna populacija', 'Total population of site visited': 'Ukupan broj stanovnika posjećenog mjesta', 'Total Records: %(numrows)s': 'Ukupno zapisa: %(numrows)s', 'Total Recurring Costs': 'Ukupni ponavljajući troškovi', 'Total Unit Cost': 'Totalni jedinični trošak', 'Total Unit Cost:': 'Ukupni jedinični trošak:', 'Total Unit Cost: ': 'Ukupna cijena jedinice: ', 'Total Units': 'Ukupno jedinica', 'Total Value': 'Ukupna vrijednost', 'Totals for Budget:': 'Ukupni iznos Budžeta:', 'Totals for Bundle:': 'Ukupno po paketu:', 'Totals for Kit:': 'Ukupni iznosi za komplet:', 'Tour added': 'Tura dodana', 'Tour Configuration': 'Struktura ture', 'Tour deleted': 'Tura obrisana', 'Tour Details': 'Detalji ture', 'Tour Name': 'Ime ture', 'Tour updated': 'Tura ažurirana', 'Tour User': 'Korisnik ture', 'Tourist Group': 'Grupa turista', 'Tours': 'Ture', 'Town': 'Grad', 'Traceback': 'Praćenje', 'Traces internally displaced people (IDPs) and their needs': 'Prati ljude sa mentalnim poremećajima i njihove potrebe', 'Tracing': 'Praćenje', 'Track': 'Praćenje', 'Track deleted': 'Praćenje obrisano', 'Track Details': 'Prati detalje', 'Track Shipment': 'Prati pošiljku', 'Track updated': 'Praćenje ažurirano', 'Track uploaded': 'Praćenje učitano', 'Track with this Person?': 'Pratiti sa ovom Osobom?', 'Trackable': 'Moguće pratiti', 'Tracking and Tracing of Persons and Groups': 'Praćenje osoba i grupa', 'Tracking of basic information on the location, facilities and size of the Shelters': 'Praćenje osnovnih informacija na lokaciji, ustanova i veličine skloništa.', 'Tracking of Patients': 'Praćenje pacijenata', 'Tracking of Projects, Activities and Tasks': 'Praćenje projekata, aktivnosti i dešavanja', 'Tracks': 'Staze', 'Tracks the location, distibution, capacity and breakdown of victims in Shelters': 'Prati lokaciju, distribucije, kapacitet i podjelu žrtava u skloništima', 'Traffic Report': 'Izvještaj o prometu', 'Training': 'Obuka', 'Training added': 'Dodan trening', 'Training Course Catalog': 'Katalog o kursu treniranja', 'Training deleted': 'Obrisan trening', 'Training Details': 'Detalji treninga', 'Training Event': 'Događaj obuke', 'Training Event added': 'Događaj obuke dodan', 'Training Event deleted': 'Događaj obuke obrisan', 'Training Event Details': 'Detalji o događaju obuke', 'Training Event updated': 'Događaj obuke ažuriran', 'Training Events': 'Događaji obuke', 'Training Facility': 'Lokacija obuke', 'Training Hours (Month)': 'Sati obuke (mjesečno)', 'Training Hours (Year)': 'Sati obuke (godišnje)', 'Training Report': 'Izvještaj obuke', 'Training updated': 'Ažuriran trening', 'Trainings': 'Treninzi', 'Transfer': 'Prijenos', 'Transfer Ownership': 'Prijenos vlasništva', 'Transfer Ownership To (Organization/Branch)': 'Prebaci vlasništvo na (organizacija/ogranak)', 'Transit': 'Tranzit', 'Transit Status': 'Status tranzita', 'Transit. Status': 'Status tranzita', 'Transition Effect': 'Efekat tranzicije', 'Translate': 'Prevedi', 'Translated File': 'Prevedena datoteka', 'Translation': 'Prevod', 'Translation Functionality': 'Funkcionalnost prijevoda', 'Transnistria': 'Pridnjestrovska', 'Transparent?': 'Providno?', 'Transport Reference': 'Transportna referenca', 'Transportation assistance, Rank': 'Prevozna pomoć, stepen', 'Transportation Required': 'Prijevoz je potreban', 'Transported By': 'Prevoznik', 'Transported by': 'Prevoznik', 'Trauma Center': 'Centar za traume', 'Travel Cost': 'Troškovi putovanja', 'Treatments': 'Tretmani', 'Tree': 'Stablo', 'Trinidad and Tobago': 'Trinidad i Tobago', 'Tropical Storm': 'Tropska Oluja', 'Tropo Messaging Token': 'Tropo token za poruke', 'Tropo Settings': 'Tropo postavke', 'Tropo settings updated': 'Twilio postavke ažurirane', 'Tropo Voice Token': 'Tropo simbol glasa', 'Truck': 'Kamion', 'Try checking the URL for errors, maybe it was mistyped.': 'Pokušajte provjeriti greške u URL-u, možda je pogrešno napisan.', 'Try hitting refresh/reload button or trying the URL from the address bar again.': 'Pokušajte sa pritiskom na dugme za osvježavanje/ponovo učitavanje ili ponovnim unosom URL u adresnoj traci.', 'Try refreshing the page or hitting the back button on your browser.': 'Pokušajte osvježiti stranicu ili pritisnuti dugme za povratak nazad u Vašem pregledniku.', 'Tsunami': 'Cunami', 'Tuesday': 'Utorak', 'Tugboat Capacity': 'Kapacitet skele', 'Tunisia': 'Tunis', 'Turkey': 'Turska', 'turned up': 'okrenut gore', 'turning grey': 'postaje sivo', 'tweepy module not available within the running Python - this needs installing for non-Tropo Twitter support!': 'tweepy modul nije dostupan u radu sa tekućom verzijom Pythona - potrebna je instalacija non-Tropo Twitter podrške', 'Tweet deleted': 'Tweet ibrisan', 'Tweet Details': 'Tweet detalji', 'Tweeted By': 'Tweet obavio', 'Tweeted by': 'Tweet obavio', 'Tweeted on': 'Tweet datum', 'Tweeted On': 'Tweet datum', 'Twilio (Inbound)': 'Twilio (dolazni)', 'Twilio Setting added': 'Twilio postavke dodane', 'Twilio Setting deleted': 'Twilio postavke obrisane', 'Twilio Setting Details': 'Detalji Twilio postavki', 'Twilio Settings': 'Twilio postavke', 'Twilio settings updated': 'Twilio postavke ažurirane', 'Twilio SMS Settings': 'Twilio SMS postavke', 'Twilio SMS settings': 'Twilio SMS postavke', 'Twitter account updated': 'Twitter nalog ažuriran', 'Twitter ID or #hashtag': 'Twitter ID ili #hashtag', 'Twitter InBox': 'Twitter ulazni sandučić', 'Twitter Search': 'Pretraži Twitter', 'Twitter Search Queries': 'Opcije Twitter pretrage', 'Twitter Search Results': 'Rezultati Twitter pretrage', 'Twitter Settings': 'Postavke Twittera', 'Twitter Timeline': 'Twitter vremenska linija', 'Type': 'Tip', 'Type of cause': 'Tip uzroka', 'Type of Construction': 'Vrsta izgradnje', 'Type of place for defecation': 'Vrsta mjesta za obavljanje nužde', 'Type of Transport': 'Vrsta transporta', 'Type of water source before the disaster': 'Tipovi izvora vode prije nepogode', "Type the first few characters of one of the Participant's names.": 'Upiši prvih nekoliko slova imena jedne od osoba.', "Type the first few characters of one of the Person's names.": 'Upiši prvih nekoliko slova imena jedne od osoba.', "Type the name of an existing catalog item OR Click 'Create Item' to add an item which is not in the catalog.": "Navedite ime postojeće stavke kataloga ili kliknite 'Kreiraj stavku' da dodate novu stavku koja nije u katalogu.", 'Type the name of an existing catalog kit': 'Unesite ime postojećeg kompleta iz kataloga', "Type the name of an existing site OR Click 'Create Warehouse' to add a new warehouse.": "Kreirajte ime postojećeg mjesta ili kliknite na 'Kreiraj skladište' da dodate novo skladište.", 'Types': 'Tipovi', 'Types of health services available': 'Dostupni tipovi zdravstvene zaštite', 'Types of water storage containers available': 'Dostupni tipovi spremnika za vodu', 'UID': 'JIB', 'Ukraine': 'Ukrajina', 'UN agency': 'UN agencija', 'Un-Repairable': 'Nepopravljiv', 'Unable to find sheet %(sheet_name)s in uploaded spreadsheet': 'Ne mogu naći list %(sheet_name)s u postavljenoj tablici', 'Unable to open spreadsheet': 'Ne mogu da otvorim tablicu', 'unable to parse csv file': 'ne mogu analizirati csv datoteku', 'Unable to parse CSV file or file contains invalid data': 'Ne mogu analizirati CSV datoteku ili datoteka sadrži nevažeće podatke', 'Unable to parse CSV file!': 'Nije moguće analizirati CSV dokument !', 'unapproved': 'neodobreno', 'Unassigned': 'Nedodijeljeno', 'Uncheck all': 'Skini sve oznake', 'uncheck all': 'poništi sve oznake', 'uncovered': 'nepokriveno', 'Under which condition a local record shall be updated if it also has been modified locally since the last synchronization': 'Pod kojim uslovima bi lokalni zapisi trebali biti ažurirani ako su također lokalno mijenjani nakon zadnje sinhronizacije', 'Under which conditions local records shall be updated': 'Pod kojim uslovima bi lokalni zapisi trebali biti ažurirani', 'Understaffed': 'nema dovoljno zaposlenih', 'Unidentified': 'Neidentifikovano', 'unidentified': 'Neidentificiran', 'uninhabitable = foundation and structure destroyed': 'nenaseljivo = temeljji i strukura uništeni', 'Union Council': 'Vijeće saveza', 'Unique code': 'Jedinstveni kôd', 'Unique identifier which THIS repository identifies itself with when sending synchronization requests.': 'Jedinstevni identifikator kojim OVAJ repozitorij definiše sebe samog slanjem sinhronizacijskih zahtjeva.', 'Unique Locations': 'Jedinstvene lokacije', 'Unit': 'Jedinica', 'Unit added': 'Jedinica dodana', 'Unit Bed Capacity': 'Kapacitet kreveta po jedinici', 'Unit Cost': 'Troškovi jedinice', 'Unit deleted': 'Jedinica obrisana', 'Unit Details': 'Detalji jedinice', 'Unit of Measure': 'Jedinica mjere', 'Unit Set': 'Jedinica postavljena', 'Unit Short Code for e.g. m for meter.': 'Kratko ime jedinice, npr m za metar', 'Unit updated': 'Jedinica ažurirana', 'Unit Value': 'Vrijednost jedinice', 'United Arab Emirates': 'Ujedinjeni Arapski Emirati', 'United Kingdom': 'Ujedinjeno Kraljevstvo', 'United States Dollars': 'američki dolari', 'Units': 'Jedinice', 'Units of Measure': 'Mjerna jedinica', 'Unknown': 'Nepoznato', 'unknown': 'Nepoznato', 'Unknown Locations': 'Nepoznate lokacije', 'Unknown Peer': 'Nepoznati saradnik', 'Unknown question code': 'Nepoznata šifra pitanja', 'Unknown type of facility': 'Nepoznata vrsta objekta', 'unlimited': 'neograničeno', 'Unloading': 'Pražnjenje', 'Unmark as duplicate': 'Ukloni oznaku kao duplo', 'Unreinforced masonry': 'Zid bez armature', 'Unresolved Conflicts': 'Neriješeni konflikti', 'Unsafe': 'Nesiguran', 'Unselect to disable the modem': 'Uklonite oznaku da biste isključili modem', 'Unselect to disable this API service': 'Izbriši oznaku da onemogućiš ovu API uslugu', 'Unselect to disable this SMTP service': 'Poništite odabir da bi onemogućili ovu SMTP uslugu', 'Unsent': 'Nije poslano', 'Unskilled': 'Neiskusan', 'unspecified': 'nije navedeno', 'Unsubscribe': 'Otkaži pretplatu', 'Unsupported data format!': 'Nepodržan format podataka!', 'Unsupported method!': 'Nepodržana metoda!', 'unverified': 'nepotvrđeno', 'Update': 'Ažuriranje', 'update': 'ažuriraj', 'Update Activity Report': 'Ažuriraj izvještaj o aktivnostima', 'Update Base Location': 'Ažuriraj baznu lokaciju', 'Update Cholera Treatment Capability Information': 'Ažuriraj informacije o sposobnosti liječenja kolere', 'Update Coalition': 'Ažuriraj koaliciju', 'Update if Master': 'Ažuriraj ako je glavno', 'update if master': 'ažuriraj ako je glavno', 'update if newer': 'ažurirajte ako je novije', 'Update if Newer': 'Ažurirajte ako je novije', 'Update Import Job': 'Ažuriraj posao za uvoz', 'Update Location': 'Ažuriraj lokaciju', 'Update Map': 'Ažuriraj mapu', 'Update Master file': 'Ažuriraj glavnu datoteku', 'Update Method': 'Metod ažuriranja', 'Update Morgue Details': 'Ažuriraj detalje mrtvačnice', 'Update Notification': 'Ažuriraj napomen u', 'Update Policy': 'Pravila ažuriranja', 'Update Report': 'Ažuriraj izvještaj', 'Update Request': 'Ažuriraj zahtjev', 'Update Service Profile': 'Ažuriraj profil usluge', 'Update Status': 'Ažuriraj status', 'Update Task Status': 'Ažuriraj status zadatka', 'Update this entry': 'Ažuriraj ovaj unos', 'Update Unit': 'Ažuriranje jedinice', 'Update your current ordered list': 'Ažuriraj trenutni uređeni spisak', 'Update/Newer': 'Ažuriraj/novije', 'Update:': 'Ažuriraj:', 'updated': 'ažurirano', 'Updated By': 'Ažurirano od', 'updates only': 'samo ažuriranja', 'Upload': 'Pošalji', 'Upload a (completely or partially) translated CSV file': 'Postavi (djelomično ili potpuno) prevedenu CSV datoteku', 'Upload a CSV file': 'Dodaj CVS datoteku', 'Upload a CSV file formatted according to the Template.': 'Učitaj fajl formata CSV prema šablonu.', 'Upload a Question List import file': 'Postavi uvoznu datoteku s listom pitanja', 'Upload a Spreadsheet': 'Slanje proračunskih tablica (spreadsheet)', 'Upload a text file containing new-line separated strings:': 'Postavi tekstualnu datoteku koja sadrži nizove znakova razdvojene novim redovima', 'Upload an Assessment Template import file': 'Postavi uvoznu datoteku za predložak pricjene', 'Upload an image file (bmp, gif, jpeg or png), max. 300x300 pixels!': 'Učitajte sliku (bmp, gif, jpeg ili png), max. 300x300 !', 'Upload an image file (png or jpeg), max. 400x400 pixels!': 'Učitajte sliku (jpeg ili png), maks. 400x400 !', 'Upload an image file here.': 'stavite sliku ovdje', "Upload an image file here. If you don't upload an image file, then you must specify its location in the URL field.": 'Ovdje učitajte slikovnu datoteku. Ako ne učitate slikovnu datoteku, onda morate specificirati njenu lokaciju u URL polju.', 'Upload an image, such as a photo': 'Podesi sliku,kao sto je fotografija', 'Upload Comma Separated Value File': 'Uploaduj datoteku vrijednosti odvojenih zarezom', 'Upload Completed Assessment Form': 'Postavi završen formular ocjene', 'Upload file': 'Postavi datoteku', 'Upload Format': 'Dodati format', 'Upload OCR Form': 'Pošalji OCR formu (optičko prepoznavanje karaktera)', 'Upload Photos': 'Učitaj fotografije', 'Upload Scanned OCR Form': 'Pošalji skenirani OCR formular', 'Upload Shapefile': 'Postavi datoteku s likovima', 'Upload Spreadsheet': 'Pošaljite tabelu proračuna', 'Upload the Completed Assessment Form': 'Postavi završen formular ocjene', 'Upload Track': 'Pošalji praćenje', 'Upload translated files': 'Pošalji prevedene datoteke', 'Upload Web2py portable build as a zip file': 'Pošalji Web2py portabilni sagrađen kao zip datoteka', 'Uploaded': 'Postavljeno', 'Uploaded file is not a PDF file. Provide a Form in valid PDF Format.': 'Postavljena datoteka nije PDF datoteka. Navedite formular u važećem PDF firmatu.', "Uploaded file(s) are not Image(s). Supported image formats are '.png', '.jpg', '.bmp', '.gif'.": "Postavljena datoteka ili datoteke ne predstavljaju slike. Podržani formati slika su '.png', '.jpg', '.bmp', '.gif'.", 'Uploaded PDF file has more/less number of page(s) than required. Check if you have provided appropriate revision for your Form as well as check the Form contains appropriate number of pages.': 'Postavljena PDF datoteka ima više/manje brojeva stranica nego što je potrebno. Provjerite da li ste naveli pravu reviziju za vaš formular i provjerite da li formular sadrži potreban broj stranica.', 'Urban area': 'Urbano područje', 'Urban Fire': 'gradski požar', 'Urban Tank Tactical Vehicle': 'Gradska taktička pokretna cisterna', 'Urgent': 'Hitno', 'urgent': 'hitno', 'URL for the Mobile Commons API': 'URL za Mobile Commons API', 'URL for the twilio API.': 'URL za twilio API.', 'URL of the default proxy server to connect to remote repositories (if required). If only some of the repositories require the use of a proxy server, you can configure this in the respective repository configurations.': 'URL podrazumijevanog Proxy servera za vezu s udaljenim repozitorijima (ako je potrebno). Ako samo neki repozitoriji zahtijevaju proxy server, možete to konfigurisatu.', 'URL of the proxy server to connect to the repository (leave empty for default proxy)': 'URL Proxy servera za vezu s repozitorijima (prazno za podrazumijevani proxy server)', 'URL of the Ushahidi instance': 'URL Ushahidi instance', 'URL to a Google Calendar to display on the project timeline.': 'URL za Google Calendar za prikaz projektne vremenske linije', 'URL to resume tour': 'URL da se nastavi tura', 'Uruguay': 'Urugvaj', 'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Koristite (...) & (...) za I, (...) | (...) za ILI, i ~ (...) za NE za izgradnju složenijih upita.', 'Use decimal': 'Koristi decimalno', 'Use default': 'Koristi podrazumjevano', 'Use default from feature class': 'Koristi podrazumijevano iz klase karakteristika', 'Use deg, min, sec': 'Koristi Stepeni Minute Sekunde', 'Use Geocoder for address lookups?': 'Koristi Geocoder za traženje adrese?', 'Use Site?': 'Da li koristiti mjesto?', 'Use these links to download data that is currently in the database.': 'Koristi ove linkove za skidanje podataka koji su trenutno u bazi.', 'Use this link to review the situation.': 'Koristite ovaj link za pregled situacije', 'Use this to set the starting location for the Location Selector.': 'Koristite ovo da podesite početnu lokaciju za Odabirač Lokacije', 'Used by IRS & Assess': 'Korišteno od strane IRS & Assess', 'Used in onHover Tooltip & Cluster Popups to differentiate between types.': 'Koristi se u onHover Tooltip & iskočnim prozorima skupova pri razlikovanju tipova', 'Used to build onHover Tooltip & 1st field also used in Cluster Popups to differentiate between records.': 'Korišteno za izgradnju onHover Tooltip i prvo polje je korišteno u iskočnim prozorima skupova za razlikovanje zapisa.', 'Used to check that latitude of entered locations is reasonable. May be used to filter lists of resources that have locations.': 'Korisit se za provjeru razumnosti geografske širine unesene lokacije. Može se koristiti za filter liste resursa koji imaju klokacije', 'Used to check that longitude of entered locations is reasonable. May be used to filter lists of resources that have locations.': 'Koristi se za provjeru geografske dužine unesenih lokacija. Može se koristiti kao filter lista izvora koje posjeduju lokacije.', 'Used to import data from spreadsheets into the database': 'Korišteno da se unesu podaci iz Tabele u bazu podataka', 'Used to populate feature attributes which can be used for Styling.': 'Korišteno za punjenje atributa karakteristika korištenih za stilove.', 'Used within Inventory Management, Request Management and Asset Management': 'Korišteno za vođenje inventara, pri upravljanju zahtjevima i upravljanju sredstvima', 'User': 'Korisnik', 'User %(id)s Logged-in': 'Korisnik %(id)s prijavljen', 'User Account': 'Korisnički nalog', 'User Account has been Approved': 'Korisnički nalog je potvrđen', 'User Account has been Disabled': 'Korisnički račun je onemogućen', 'User added': 'Korisnik dodan', 'User added to Role': 'Korisnik dodan u ulogu', 'User already has this role': 'Korisnik već ima datu ulogu', 'User already in Group!': 'Korisnik već u grupi', 'User deleted': 'Korisnik obrisan', 'User Details': 'Korisnički detalji', 'User Guidelines Synchronization': 'Sinhronizacija korisničkih smjernica rada', 'User has been (re)linked to Person and Human Resource record': 'Korisnik je ponovo vezan za zapis o osoblju i ljudskim resursima.', 'User has no Email address!': 'Korisnik nema e-mail adrese!', 'User has no SMS address!': 'Korisnik nema SMS adrese!', 'User ID': 'Korisnički ID', 'User Management': 'Upravljanje korisnicima', 'User Profile': 'Korisnički profil', 'User Requests': 'Korisnički zahtjevi', 'User Roles': 'Uloge korisnika', 'User Updated': 'Ažuriran korisnik', 'User updated': 'Korisnik ažuriran', 'User with Role': 'Korisnik s ulogom', "User's role": 'Korisnička uloga', 'Username': 'Korisničko ime', 'Username & Password': 'Korisničko ime i lozinka', 'Username to use for authentication at the remote site.': 'Korisničko ime za prijavu na udaljeni sajt.', 'Users': 'Korisnici', 'Users in my Organizations': 'Korisnici u mojim organizacijama', 'Users removed': 'Korisnici uklonjeni', 'Users with this Role': 'Korisnici s ovom ulogom', 'Uses the REST Query Format defined in': 'Koristi REST format upita definiran u', 'Ushahidi': 'Ushahidi', 'Ushahidi Import': 'Uvoz iz Ushahidi', 'using default': 'koristim podrazumijevani', 'Usual food sources in the area': 'Ukobičajen izvor hrane u području', 'UTC Offset': 'UTC pomak', 'Utilities': 'Usluge', 'Utility, telecommunication, other non-transport infrastructure': 'Uslužne, telekomunikacijske i ostale netransportne infrastrukture', 'Utilization Details': 'Detalji upotrebe', 'Utilization Report': 'Izvještaj o upotrebi', 'UUID of foreign Sahana server': 'UUID udaljenog Sahana servera', 'Valid': 'važeće', 'Valid From': 'Važi od', 'Valid Until': 'Važi do', 'Value': 'Vrijednost', 'Value per Pack': 'Vrijednost po paketu', 'Various Reporting functionalities': 'Razne funkcionalnosti izvještaja', 'Vatican City': 'Vatikan', 'VCA (Vulnerability and Capacity Assessment)': 'VCA (Procjena ranjivosti i kapaciteta)', 'Vehicle': 'Vozilo', 'Vehicle added': 'Dodano vozilo', 'Vehicle assigned': 'Vozilo dodijeljeno', 'Vehicle Assignment updated': 'Dodjela osoblja ažurirana', 'Vehicle Assignments': 'Dodjele vozila', 'Vehicle Categories': 'Kategorije vozila', 'Vehicle Category': 'Kategorija vozila', 'Vehicle Crime': 'Zločin s vozilima', 'Vehicle deleted': 'Obrisano vozilo', 'Vehicle Details': 'Detalji o vozilu', 'Vehicle Details added': 'dodani detalji vozila', 'Vehicle Details deleted': 'obrisani detalji vozila', 'Vehicle Details updated': 'ažurirani detalji vozila', 'Vehicle Management': 'Vođenje vozila', 'Vehicle Plate Number': 'Registarski broj vozila', 'Vehicle Type': 'Vrsta vozila.', 'Vehicle Type added': 'Vrsta vozila dodana', 'Vehicle Type deleted': 'Vrsta vozila obrisana', 'Vehicle Type Details': 'Detalji o vrsti vozila', 'Vehicle Type updated': 'Vrsta vozila ažurirana', 'Vehicle Types': 'Vrste vozila', 'Vehicle unassigned': 'Vozilo nedodijeljeno', 'Vehicle updated': 'Ažurirano vozilo', 'Vehicles': 'Vozila', 'Vehicles are assets with some extra details.': 'Vozila su sredstva sa nekim dodatnim detaljima', 'Vendor': 'Proizvođač', 'Venezuela': 'Venecuela', 'Venue': 'Mjesto održavanja', 'Verification Status': 'Status provjere', 'verified': 'provjereno', 'Verified': 'Potvrđeno', 'Verified?': 'Potvrđeno?', 'Verify password': 'Potvrdite lozinku', 'Verify Password': 'Potvrdite lozinku', 'Version': 'Verzija', 'vertical': 'vertikalno', 'Very Good': 'Veoma dobro', 'Very High': 'Veoma visok', 'Very Strong': 'Veoma jako', 'Vessel Max Length': 'Max. dužina čamca', 'Victim': 'Žrtva', 'Video Tutorials': 'Video lekcije', 'Vietnam': 'Vijetnam', 'View': 'Pogled', 'view': 'pogled', 'View & Edit Pledges': 'Pregled/Uređivanje ponuda za pomoć', 'View Alerts received using either Email or SMS': 'Pregledaj upozorenja primljena korištenjem Email-a ili SMS-a', 'View All': 'Prikaži sve', 'View all log entries': 'Pogledaj sve unose u zapisniku', 'View All Tickets': 'Pogledaj sve kartice', "View and/or update details of the person's record": 'Prikažite i/ili ažurirajte detalje zapisa za ovu osobu', 'View and/or update their details': 'Prikažite i/ili ažurirajte njihove detalje', 'View as Pages': 'Pogledaj kao stranice', 'View Email Accounts': 'Pogledaj naloge elektronske pošte', 'View Email InBox': 'Pogledaj E-mail dolazne poruke', 'View Error Tickets': 'Pregledati kartice grešaka', 'View full screen': 'Pogledaj preko cijelog ekrana', 'View Fullscreen Map': 'Vidi mapu cijelog ekrana', 'View Image': 'Pogledaj sliku', 'View InBox': 'Pogledaj dolazne poruke', 'View Items': 'Prikaz stavki', 'View Location Details': 'Pogledaj detalje lokacije', 'View log entries per repository': 'Pogledaj stavke zapisnika po repozitoriju', 'View Message Log': 'Prikaži zapisnik poruka', 'View Mobile Commons Settings': 'Pogledaj mobilne postavke', 'View On Map': 'Pogledaj na mapi', 'View on Map': 'Pogledaj na Mapi', 'View or update the status of a hospital.': 'Pregledanje ili ažuriranje statusa bolnice.', 'View Outbox': 'Pogledaj izlazno sanduče', 'View Parser Connections': 'Pogledaj parserske konekcije', 'View pending requests and pledge support.': 'Pregled zahtjeva na čekanju i ponuda podrške', 'View Picture': 'Pogledaj sliku', 'View Queries': 'Pogledaj upite', 'View Requests & Pledge Aid': 'Pogledaj zahtjeve i ponude za pomoć', 'View Requests for Aid': 'Pogledaj zahtjeve za pomoć', 'View Results of completed and/or partially completed assessments': 'Pogledaj rezultat završenih i/ili polovišno završenih procjena', 'View RSS Posts': 'Pogledaj RSS poruke', 'View RSS Settings': 'Pogledaj RSS Postavke', 'View Sender Priority': 'Pogledaj prioritet pošiljaoca', 'View Sent Emails': 'Pogledaj poslane E-mail poruke', 'View Sent SMS': 'Pogledaj poslane SMS poruke', 'View Sent Tweets': 'Pogledaj poslane Tweet poruke', 'View Settings': 'Prikaz postavki', 'View SMS InBox': 'Pogledaj SMS dolazne poruke', 'View SMS OutBox': 'Pogledaj SMS odlazne poruke', 'View Test Result Reports': 'Pogledaj izještaj o rezultatima testiranja', 'View the hospitals on a map.': 'Pogledaj bolnice na mapi.', 'View the module-wise percentage of translated strings': 'Pogledaj procenat prevedenosti stringova po modulu', 'View Tickets': 'Vidi kartice', 'View Translation Percentage': 'Pogledaj procenat prijevoda', 'View Tweet': 'Pogledaj tweet', 'View Twilio Settings': 'Pogledaj Twilio postavke', 'View Twitter InBox': 'Pogledaj Twittwe dolazne poruke', 'View/Edit Person Details': 'Pogledaj/uredi detalje osobe', 'View/Edit the Database directly': 'Pogledaj/Uredi Bazu podataka direktno', "View/Edit the Database directly (caution: doesn't respect the framework rules!)": 'Pogledaj/uredi bazu podataka direktno (Upozorenje: nepoštivanje okvirnih pravila!)', 'Village': 'Selo', 'Village / Suburb': 'Selo / predgrađe', 'Village Leader': 'Vođa sela', 'Visible?': 'Vidljivo?', 'Visual Recognition': 'Vizuelno prepoznavanje', 'vm_action': 'vm_akcija', 'Volcanic Ash Cloud': 'Oblak vulkanskog pepela', 'Volcanic Event': 'Vulkanski događaj', 'Volume (m3)': 'Zapremina (m3)', 'Volume - Fluids': 'Sadržaj - tečnosti', 'Volume - Solids': 'Sadržaj - čvrsti', 'Volume/Dimensions': 'Sadržaj/Dimenzije', 'Voluntarios': 'Volonterski', 'Volunteer': 'Volonter', 'volunteer': 'volonter', 'Volunteer added': 'Volonter dodan', 'Volunteer Availability': 'Dostupnost volontera', 'Volunteer availability added': 'Dodana dostupnost volontera', 'Volunteer availability deleted': 'Dostupnost volontera obrisana', 'Volunteer availability updated': 'Ažurirana dostupnost volontera', 'Volunteer Cluster': 'Skup volontera', 'Volunteer Cluster added': 'Skup volontera dodan', 'Volunteer Cluster deleted': 'Skup volontera obrisan', 'Volunteer Cluster Position': 'Pozicija skupa volontera', 'Volunteer Cluster Position added': 'Pozicija skupa volontera dodana', 'Volunteer Cluster Position deleted': 'Pozicija skupa volontera obrisana', 'Volunteer Cluster Position updated': 'Pozicija skupa volontera ažurirana', 'Volunteer Cluster Type': 'Tip skupa volontera', 'Volunteer Cluster Type added': 'Vrsta skupa volontera dodana', 'Volunteer Cluster Type deleted': 'Vrsta skupa volontera obrisana', 'Volunteer Cluster Type updated': 'Vrsta skupa volontera ažurirana', 'Volunteer Cluster updated': 'Skup volontera ažuriran', 'Volunteer Contact': 'Kontakt volontera', 'Volunteer Data': 'Podaci o volonterima', 'Volunteer deleted': 'Volonter obrisan', 'Volunteer Details': 'Detalji o volonteru', 'Volunteer details updated': 'Detalji o volonterima ažurirani', 'Volunteer Details updated': 'Volonter ažuriran', 'Volunteer Hours': 'Volonterski sati', 'Volunteer ID': 'ID volontera', 'Volunteer Information': 'Informacije Volontera', 'Volunteer Management': 'Koordinacija volontera', 'Volunteer Project': 'Volonterski projekat', 'Volunteer Record': 'Volonterski zapis', 'Volunteer Report': 'Izvještaj o volonterima', 'Volunteer Request': 'Volonterski zahtjev', 'Volunteer Role': 'Volonterska uloga', 'Volunteer Role added': 'Uloga volontera dodana', 'Volunteer Role Catalog': 'Katalog volonterskih uloga', 'Volunteer Role deleted': 'Uloga volontera obrisana', 'Volunteer Role Details': 'Detalji volonterske uloge', 'Volunteer Role updated': 'Uloga volontera ažurirana', 'Volunteer Service Record': 'Izvještaj o volonterskoj usluzi', 'volunteers': 'volonteri', 'Volunteers': 'Volonteri', 'Volunteers were notified!': 'Volonteri su obavješteni!', 'Vote': 'Glasati', 'Votes': 'Glasovi', 'Vulnerability Document': 'Dokument o ranjivosti', 'Walking Only': 'Samo hodanje', 'Walking time to the health service': 'Potrebno vrijeme hoda do zdravstvenih usluga', 'Wall or other structural damage': 'Zid ili druga strukturna oštećenja', 'Warehouse': 'Skladište', 'Warehouse added': 'Skladište dodano', 'Warehouse deleted': 'Skladište obrisano', 'Warehouse Details': 'Detalji o skladištu', 'Warehouse Item added': 'Dodata stavka skladišta', 'Warehouse Item deleted': 'Stavka skladišta obrisana', 'Warehouse Item Details': 'Detalji o stavci skladišta', 'Warehouse Items': 'Stavke skladišta', 'Warehouse Management': 'Upravljanje skladištem', 'Warehouse Stock': 'Zaliha u skladištu', 'Warehouse Stock Details': 'Detalji o zalihi skladišta', 'Warehouse Stock Report': 'Izvještaj zaliha u skladištu', 'Warehouse Stock updated': 'Ažurirana stavka skladišta', 'Warehouse updated': 'Skladište ažurirano', 'Warehouse/Sites Registry': 'Registar skladiša/mjesta', 'Warehouses': 'Skladišta', 'Warehousing Storage Capacity': 'Kapacitet skladišta', 'WARNING': 'UPOZORENJE', 'WASH': 'OPERI', 'Water': 'Voda', 'Water collection': 'Skupljanje vode', 'Water gallon': 'Kanister vode', 'Water Sanitation Hygiene': 'Higijena sanitacije vode', 'Water storage containers in households': 'Kontejneri za vodu u domaćinstvima', 'Water storage containers sufficient per HH': 'Kontejneri za vodu dovoljni za domaćinsto', 'Water Supply': 'Dostava vode', 'Water supply': 'Dostava vode', 'Waterspout': 'Vodena pijavica', 'WatSan': 'WatSan', 'wavy': 'valovito', 'Way Bill(s)': 'Putni troškovi', 'Waybill': 'Tovarni list', 'WAYBILL': 'TOVARNILIST', 'Waybill Number': 'Broj tovarnog lista', "We have no active problem. That's great!": 'Nemamo aktivnog problema. Odlično!', 'We have tried': 'Pokušavali smo', 'Weak': 'Slabo', 'Web API settings updated': 'Web API postavke ažurirane', 'Web Form': 'Web formular', 'Web Map Service Browser Name': 'Ime usluge za pregled mape preko web-a', 'Web Map Service Browser URL': 'URL izbornika Web Map servisa', 'Web2py executable zip file found - Upload to replace the existing file': 'Web2py izvršna zip datoteka nađena - Pošaljite da zamijenite postojeću datoteku', 'Web2py executable zip file needs to be uploaded first to use this function.': 'Web2py izvršna zip datoteka treba da se prvo pošalje da bi ste koristili ovu funkcionalnost', 'Web2py executable zip file needs to be uploaded to use this function.': 'Web2py izvršna zip datoteka treba da se prvo pošalje da bi ste koristili ovu funkcionalnost', 'Website': 'Web stranica', 'Wednesday': 'Srijeda', 'Week': 'Sedmica', 'Weekends only': 'Samo vikendima', 'weekly': 'sedmično', 'Weekly': 'Sedmično', 'Weight': 'Težina', 'Weight (kg)': 'Težina (kg)', 'Welcome to the Sahana Eden Disaster Management System': 'Dobrodošli na Sahana Eden, sistem za upravljanje u slučaju katastrofa', 'Welcome to the Sahana FOSS Disaster Management System': 'Dobrodošli na Sahana FOSS, sistem za upravljanje u slučaju katastrofa', 'Welcome to the Sahana Portal at': 'Dobrodošli na Sahana Portal u', 'Well-Known Text': 'Dobro poznat tekst', 'Were breast milk substitutes used prior to the disaster?': 'Da li su korištene zamjene za majčino mlijeko prije katastrofe?', 'WFS Layer': 'WFS sloj', 'WGS84 (EPSG 4236) is required for many WMS servers.': 'WGS84 (EPSG 4236) je potreban za mnoge WMS servere', 'What are the factors affecting school attendance?': 'Koji faktori koji utiču na pohađanje škole', 'What are your main sources of cash to restart your business?': 'Koji su vaši glavni izvori novca za ponovni početak posla?', 'What are your main sources of income now?': 'Koji su vaši izvori primanja sada?', 'What do you spend most of your income on now?': 'Na šta sada trošite najveći dio prihoda?', 'What food stocks exist? (main dishes)': 'Koje zalihe hrane postoje (glavne namirnice)=', 'What food stocks exist? (side dishes)': 'Koje zalihe hrane postoje (dodatne namirnice)', 'What is your major source of clean water for daily use (ex: washing, cooking, bathing)?': 'Koji je glavni izvor čiste vode za svakodnevnu upotrebu (pranje, kuhanje, kupanje=', 'What is your major source of drinking water?': 'Koji je glavni izvor pitke vode?', 'What order to be contacted in.': 'Redoslijed kontaktiranja.', "What should be done to reduce women and children's vulnerability to violence?": 'Šta uraditi za smanjiti ranjivost žena i djece zbog nasilja?', 'What the Items will be used for': 'Za što će se koristiti ove stavke?', 'What type of latrines are available in the village/IDP centre/Camp?': 'Koji tip zahoda je dostupan u selu/centru/kampu?', 'What type of salvage material can be used from destroyed houses?': 'Koji tip spašenog materijala se može koristiti iz uništenih kuća', 'What type of salvage material can be used from destroyed schools?': 'Koji tip spašenog materijala se može koristiti iz uništenih škola', 'What types of health problems do children currently have?': 'Koju vrstu zdravstvenih problema djeca trenutno imaju?', 'What types of household water storage containers are available?': 'Koji tipovi kućnih spremnika za vodu su dostupni?', 'What were your main sources of income before the disaster?': 'Koji su bili vaši izvori primanja prije nepogode', 'Wheat': 'Žito', 'When reports were entered': 'Kada su izvještaji uneseni', "When syncing data with others, conflicts happen in cases when two (or more) parties want to sync information which both of them have modified, i.e. conflicting information. Sync module tries to resolve such conflicts automatically but in some cases it can't. In those cases, it is up to you to resolve those conflicts manually, click on the link on the right to go to this page.": 'Kada se podaci usklađuju, dolazi do konflikta u slučaju kada dvije (ili više) stranaka želi da sinhronizira informacije koje su izmjenili, tj. protivne informacije. Sync modul pokušava riješiti ovakve konflikte ali ne uspjeva u nekim slučajevima. Tada je do Vas da riješite konflikte ručno, kliknite na link sa desne strane koji će Vas uputiti na tu stranicu.', 'When this search was last checked for changes.': 'Kada je ova pretraga zadnji put provjerena za promjene.', 'Where are the alternative places for studying?': 'Gdje su alternativna mjesta za studiranje', 'Where are the separated children originally from?': 'Koje je porijeklo odvojene djece?', 'Where do the majority of people defecate?': 'Gdje većina ljudi vrši nuždu?', 'Where have the children been sent?': 'Gdje su djeca poslana?', 'Where is solid waste disposed in the village/camp?': 'Gdje se smeće ostavlja u selu/kampu?', 'Where reached': 'Gdje je dosegnut', 'Whether calls to this resource should use this configuration as the default one': 'Da li bi pozivi na ovaj resurs trebali koristiti ovu konfiguraciju kao podrazumijevanu', 'Whether the Latitude & Longitude are inherited from a higher level in the location hierarchy rather than being a separately-entered figure.': 'Da li su geografska širina i dužina naslijeđeni iz višeg nivoa u hijerarhiji lokacija, umjesto da su posebno navedeni.', 'Whether the resource should be tracked using S3Track rather than just using the Base Location': 'Da li se resurs treba pratiti koristeći S3Track umjesto da se samo koriste iz bazne lokacije', 'Whether this is a Sahana Eden, Sahana Agasti, Ushahidi or Other instance.': 'Da li je ovo kopija Sahana Eden, Sahana Agasti, Ushahidi ili drugo', 'Which methods to apply when importing data to the local repository': 'Koje metode primijeniti pri uvozu podataka u lokalni repozitorij', 'Whiskers': 'Brkovi', 'white': 'bijela', 'Whitelist a Sender': 'Stavi pošiljaoca na bijelu listu', 'Whitelisted Senders': 'Pošiljaoci na bijeloj listi', 'Who is doing what and where': 'Ko šta radi i gdje', 'Who is doing What Where': 'Ko šta radi i gdje', 'Who usually collects water for the family?': 'Ko obično u porodici sakuplja vodu?', 'wide': 'širok', 'wider area, longer term, usually contain multiple Activities': 'veće područje, na duže vrijeme, obično sadrži više aktivnosti', 'widowed': 'udovac/udovica', 'Width': 'širina', 'Width (m)': 'Širina (m)', 'Wikipedia': 'Wikipedia', 'Wild Fire': 'Požar', 'Will be filled automatically when the Item has been Repacked': 'Bit će popunjeno automatski pri prepakovanju stavke', 'Will create and link your user account to the following records': 'Kreiraće i povezati korisnički nalog s sljedećim zapisima', 'Wind Chill': 'Hladni Vjetar', 'window': 'prozor', 'Window frame': 'Okvir prozora', 'windows broken, cracks in walls, roof slightly damaged': 'prozori razbijeni, pukotine u zidovima, krov blago oštećen', 'Winter Storm': 'Zimska oluja', 'within human habitat': 'unutar prebivališta', 'WKT is Invalid!': 'WKT nije validan', 'WMS Browser URL': 'URL za WMS pretraživač', 'WMS Layer': 'WMS sloj', 'Women of Child Bearing Age': 'Žena u reproduktivnom dobu', 'Women participating in coping activities': 'Žene učesnici u aktivnostima prilagođavanja', 'Women who are Pregnant or in Labour': 'Trudnice i porodilje', 'Womens Focus Groups': 'Ženske fokus grupe', 'Wooden plank': 'drvena daska', 'Wooden poles': 'Drveni stubovi', 'Work': 'Posao', 'Work on Program': 'Rad na programu', 'Work phone': 'Telefon na poslu', 'Working hours end': 'Radna satnica završena', 'Working hours start': 'Početak radnog vremena', 'Working or other to provide money/food': 'Radi, ili nešto drugo, da bi osigurao novac/hranu', 'Would you like to display the photos on the map?': 'Želite li prikazati fotografije na mapi', 'X-Ray': 'X-zraci', 'xlwt module not available within the running Python - this needs installing for XLS output!': 'xlwt modul nije dostupan uz tekuću verziju Pythona - to se treba instalirati za XLS izlaz!', 'xlwt module not available within the running Python - this needs installing to do XLS Reporting!': 'Modul xlwt nije dostupan unutar pokrenutog Python-a, ovo zahtijeva instalaciju XLS izvještaja!', 'xlwt not installed, so cannot export as a Spreadsheet': 'xlwt nije instaliran pa ne mogu izvesti kao tablicu', 'XSL Template Not Found:': 'XSL šablon nije pronađen:', 'XSL Transformation Error:': 'Greška u XSL transformaciji', 'XSLT Template Not Found:': 'XSLT šablon nije pronađen:', 'XSLT Transformation Error:': 'Greška u XSLT transformaciji', 'XYZ Layer': 'XYZ sloj', "Yahoo Layers cannot be displayed if there isn't a valid API Key": 'Yahoo slojevi ne mogu biti prikazani ako ne postoji ispravan API ključ', 'Year': 'Godina', 'Year built': 'Godina izgradnje', 'Year of Manufacture': 'Godina proizvodnje', 'Year that the organization was founded': 'Godina osnivanja organizacije', 'Yellow': 'Žuta', 'Yemen': 'Jemen', 'YES': 'DA', 'yes': 'da', 'Yes': 'Da', 'Yes, No': 'Da , ne', "Yes, No, Don't Know": 'Da, Ne, Ne znam', 'You are a recovery team?': 'Vi ste ekipa za sanaciju?', 'You are attempting to delete your own account - are you sure you want to proceed?': 'Pokušavate izbrisati svoj vlastiti račun - da li ste sigurni da želite da nastavite?', 'You are currently reported missing!': 'Vi ste trenutno prijavljeni kao nestali!', 'You can change the configuration of synchronization module in the Settings section. This configuration includes your UUID (unique identification number), sync schedules, beacon service and so on. Click the following link to go to the Sync Settings page.': 'Možete promijeniti konfiguraciju modula sinhronizacije u sekciji Postavke. Ova konfiguracije uključuje vaš UUID (jedinstveni indentifikacijski broj), sinhronizaciju rasporeda, upravljački servis itd. Idite na sljedeći link da biste otišli na stranicu Postavke sinhronizacije.', 'You can click on the map below to select the Lat/Lon fields': 'Pritisnite na mapu ispod da selektirate Lat/Lon polja', 'You can only make %d kit(s) with the available stock': 'Možete napraviti %d komplet(a) s dostupnom zalihom', "You can search by asset number, item description or comments. You may use % as wildcard. Press 'Search' without input to list all assets.": "Možete tražiti po broju sredstva, opisu stavke ili komentarima. Možete koristi % kao džoker. Pritisnite 'Traži' bez unosa za spisak svih sredstava.", "You can search by by group name, description or comments and by organization name or acronym. You may use % as wildcard. Press 'Search' without input to list all.": "Možete tražiti po imenu grupe, opisu ili komentarima i po imenu organizacije ili akronimu. Možete koristiti % kao džoker. Pritisnite 'Traži' bez unosa za prikaz svega.", "You can search by course name, venue name or event comments. You may use % as wildcard. Press 'Search' without input to list all events.": "Možete tražiti po imenu kursa, mjestu održavanja ili komentarima događaja. Možete koristiti % kao džoker. Pritisnite 'Traži' bez unosa za prikaz svih događaja-", "You can search by description. You may use % as wildcard. Press 'Search' without input to list all incidents.": "Možete tražiti po opisu. Možete koristiti % kao džoker. Pritisnite 'Traži' bez unosa za prikaz svih incidenata.", "You can search by job title or person name - enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": "Možete tražiti po radnom mjestu ili ličnom imenu, unesite ime, prezime ili srednje ime razdvojeno razmacima. Možete koristiti % kao džoker. Pritisnite 'Traži' bez unosa za prikaz svih osoba.", 'You can search by name, acronym or comments': 'Možete tražiti po imenu, akronimu ili komentarima', 'You can search by name, acronym, comments or parent name or acronym.': 'Možete tražiti po imenu, akronimu, komentarima ili imenu/akronimu nadređenog zapisa.', "You can search by person name - enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": "Možete tražiti po ličnom imenu, unesite ime, prezime ili srednje ime razdvojeno razmacima. Možete koristiti % kao džoker. Pritisnite 'Traži' bez unosa za prikaz svih osoba.", "You can search by trainee name, course name or comments. You may use % as wildcard. Press 'Search' without input to list all trainees.": "Možete tražiti po imenu kursiste, imenu kursa ili komentarima. Možete koristiti % kao džoker. Pritisnite 'Traži' bez unosa za prikaz svih kursista.", 'You can select an area on the image and save to crop it.': 'Možete odabrati područje slike za njeno snimanje i izrezivanje.', 'You can select the Draw tool': 'Možete odabrati alat za crtanje', 'You can select the Draw tool (': 'Možete odabrati alat za crtanje (', 'You can set the modem settings for SMS here.': 'Možete postaviti postavke modema za SMS ovdje.', 'You can use the Conversion Tool to convert from either GPS coordinates or Degrees/Minutes/Seconds.': 'Možete koristiti sredstvo za konverziju kako bi pretvorili iz GPS koordinata ili iz Stepeni/Minute/Sekunde.', 'You do no have permission to cancel this received shipment.': 'Nemate dozvolu da otkažete primljenu isporuku.', 'You do no have permission to cancel this sent shipment.': 'Nemate dozvolu da otkažete ovu poslanu pošiljku.', 'You do no have permission to make this commitment.': 'Ne posjedujte dozvolu da se obavežete za takvo nešto', 'You do no have permission to receive this shipment.': 'Nemate dozvolu da primite ovu pošiljku.', 'You do no have permission to send this shipment.': 'Nemate dozvolu da pošaljete ovu pošiljku', 'You do not have permission for any facility to add an order.': 'Nemate odobrenja ni za jedan objekat da dodate narudžbu.', 'You do not have permission for any facility to make a commitment.': 'Nemate dozvolu za angažovanje za neki objekt.', 'You do not have permission for any facility to make a request.': 'Nemate dozvolu za podnošenje zahtjeva bilo kojem objektu.', 'You do not have permission for any facility to perform this action.': 'Nemate odobrenja ni za jedan objekat da obavite ovu akciju.', 'You do not have permission for any facility to receive a shipment.': 'Nemate dozvolu ni za jedan objekat da primite pošiljku.', 'You do not have permission for any facility to send a shipment.': 'Nemate dozvolu da pošaljete pošiljku bilo kojem objektu.', 'You do not have permission for any organization to perform this action.': 'Nemate odobrenja ni za jednu organizaciju da obavite ovu akciju', 'You do not have permission for any site to add an inventory item.': 'Nemate dozvolu da dodate stavku inventara ni na jednom mjestu', 'You do not have permission for any site to receive a shipment.': 'Nemate dozvolu da ijedna lokacija dobije pošiljku', 'You do not have permission for any site to send a shipment.': 'Nemate dozvolu za bilo koju stranicu za slanje pošiljke', 'You do not have permission to adjust the stock level in this warehouse.': 'Nemate odobrenja da prilagodite nivo zalihe za ovo skladište', 'You do not have permission to cancel this received shipment.': 'Nemate dozvolu da otkažete primljenu isporuku.', 'You do not have permission to cancel this sent shipment.': 'Nemate dozvolu da otkažete ovu poslanu pošiljku.', 'You do not have permission to make this commitment.': 'Ne posjedujte dozvolu da napravite ovo zaduženje', 'You do not have permission to receive this shipment.': 'Nemate dozvolu da primite ovu pošiljku.', 'You do not have permission to return this sent shipment.': 'Nemate dozvolu da vratite ovu poslanu pošiljku.', 'You do not have permission to send a shipment from this site.': 'Nemate dozvolu da šaljete pošiljku sa ovog mjesta.', 'You do not have permission to send messages': 'Nemate dozvolu da pošaljete poruke', 'You do not have permission to send this shipment.': 'Nemate dozvolu da pošaljete ovu pošiljku', 'You have a personal map configuration. To change your personal configuration, click': 'Imate ličnu konfiguraciju mape. Ukoliko želite promjeniti ličnu konfiguraciju, kliknite', 'You have a personal map configuration. To change your personal configuration, click ': 'Imate ličnu konfiguraciju mape. Za promjenu, kliknite ', 'You have committed for all people in this Request. Please check that all details are correct and update as-required.': 'Zadužili ste za sve ljude u ovom zahtjevu. Molim provjerite da su svi detalji ispravni i ažurirajte kako je potrebno.', 'You have committed to all items in this Request. Please check that all details are correct and update as-required.': 'Zadužili ste sve ljude u ovom zahtjevu. Molim provjerite da su svi detalji ispravni i ažurirajte kako je potrebno.', 'You have committed to this Request. Please check that all details are correct and update as-required.': 'Napravili ste zaduženje po ovom zahtjevu. Molim provjerite da su svi detalji ispravni i ažurirajte kako je potrebno.', 'You have found a dead body?': 'Pronašli ste mrtvo tijelo?', "You have personalised settings, so changes made here won't be visible to you. To change your personalised settings, click": 'Imate lične postavke, pa promjene ovdje načinjene vam neće biti vidljive. Ukoliko želite promjeniti ličnu konfiguraciju, kliknite', "You have unsaved changes. Click Cancel now, then 'Save' to save them. Click OK now to discard them.": "Postoje promjene koje nisu spašene. Pritisnite 'Odustani', zatim pritisnite 'Snimi' da biste ih sačuvali. Pritisnite OK da biste ih odbacili.", 'You have unsaved changes. You need to press the Save button to save them': 'Imate nesnimljenih promjena. Možete kliknuti dugme za snimanje da ih snimite', "You haven't made any calculations": 'Niste napravili nikakve proračune', 'You must agree to the Terms of Service': 'Morate se složiti s uslovima upotrebe', 'You must be logged in to register volunteers.': 'Morate biti prijavljeni da registrujete volontere.', 'You must be logged in to report persons missing or found.': 'Morate biti ulogovani da biste prijavili nestanak ili pronalazak osobe.', 'You must enter a minimum of %d characters': 'Morate unijeti najmanje %d znakova', 'You must enter a minimum of 4 characters': 'Morate unijeti najmanje 4 znaka', 'You must provide a series id to proceed.': 'Morate obezbijediti ID serije da nastavite.', 'You need to check all item quantities and allocate to bins before you can receive the shipment': 'Morate provjeriti sve količine stavki i dodijeliti ih u korpe prije prijema pošiljke', 'You need to check all item quantities before you can complete the return process': 'Morate provjeriti sve količine stavki prije završetka procesa vraćanja', 'You need to create a template before you can create a series': 'Trebate kreirati predložak prije nego možete kreirati seriju', 'You need to have at least 2 records in this list in order to merge them.': 'Trebate imati bar 2 zapisa u ovoj listi da ih možet spojiti.', 'You need to use the spreadsheet which you can download from this page': 'Trebate koristiti tablicu koju možete preuzeti s ove stranice', 'You should edit Twitter settings in models/000_config.py': 'Trebali biste izmijeniti postavke na Twitteru u models/000_config.py', 'Your action is required. Please approve user': 'Vaša akcija je potrebna, molim potvrdite korisnika', 'Your action is required. Please approve user %s asap:': 'Potrebna je vaša akcija. Potvrdite korisnika %s što prije moguće:', 'Your current ordered list ... (#TODO [String])': 'Vaša trenutna lista narudžbi ... (#TODO [String])', 'Your current ordered list of solution items is shown below. You can change it by voting again.': 'Vaša trenutno naručena lista riješenih predmeta je prikazana ispod. Možete je promijeniti tako što ćete opet glasati.', 'Your name for this search. Notifications will use this name.': 'Vaše ime za ovu pretragu. Napomene će koristiti ovo ime.', 'Your post was added successfully.': 'Vaša poruka je uspješno dodata.', 'Your system has been assigned a unique identification (UUID), which other computers around you can use to identify you. To view your UUID, you may go to Synchronization -> Sync Settings. You can also see other settings on that page.': 'Vašem sistemu je dodijeljen jedinstveni identifikacijski broj (UUID), kojeg ostali računari oko vas mogu koristiti da vas identifikuju. Da biste vidjeli svoj UUID, možete otići na Sinhronizacija-> Postavke sinhronizacije. Na toj stranici možete vidjeti i ostala podešavanja.', 'Zambia': 'Zambija', 'Zero Hour': 'Početni trenutak', 'Zeroconf Description': 'Opis bez potrebe za konfiguracijom', 'Zimbabwe': 'Zimbabve', 'Zinc roof': 'Krov od cinka', 'ZIP Code': 'Poštanski broj', 'ZIP/Postcode': 'Poštanski broj', 'Zone': 'Zona', 'Zoom': 'Uvećaj', 'Zoom In': 'Uvećaj', 'Zoom in closer to Edit OpenStreetMap layer': 'Približi za uređivanjeEdit OpenStreetMap sloja', 'Zoom In: click in the map or use the left mouse button and drag to create a rectangle': 'Uvećanje: kliknite unutar mape ili koristite lijevu tipku miša da napravite pravougaonik', 'Zoom Levels': 'Nivoi zumiranja', 'Zoom Out: click in the map or use the left mouse button and drag to create a rectangle': 'Umanjenje: kliknite unutar mape ili koristite lijevu tipku miša da napravite pravougaonik', 'Zoom to Current Location': 'Uvećaj na trenutnu lokaciju', 'Zoom to maximum map extent': 'Uvećaj na maksimalnu veličinui mape', }
bobrock/eden
languages/bs.py
Python
mit
537,974
[ "VisIt" ]
581b7c0bc679c9e927ca394b345d61450b48b1c2b4cd7572adf4efdbf36ec829
# -*- coding: utf-8 -*- # # This file is part of CancellationTools # # CancellationTools is open-source software for running cancellation tasks, # and directly analysing the data they produce. # # Copyright (C) 2014, Edwin S. Dalmaijer # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/> __author__ = u"Edwin Dalmaijer" # CancellationTools from libhelper import gaussian, intersection, pearsonr from libinput import check_mouseclicks # native import copy import math import os # external from matplotlib import font_manager, image, pyplot import numpy import pygame # try importing Android, to support the Android app try: import android # if the import fails, we're probably not running the Android app, so no panic except: pass # DEBUG # #from androidfriendly.matplotlib import font_manager, image, pyplot #import androidfriendly.numpy as numpy # # # # # # # # # # # FUNCTIONS def batch_analysis(settings): """Runs an analysis for every single dataset that is in the data folder, saving the output while running; afterwards all the output text files are read and their content is combined into a single text file arguments settings - app settings dict, which includes a dict on the task properties returns settings - same settings dict as was passed (updated) """ # get display disp = pygame.display.get_surface() disp.fill(settings[u'bgc']) # check if the current analysis is for an online task if os.path.dirname(settings[u'analysisproperties'][u'datapath']) == settings[u'dir'][u'onlinedata']: datadir = settings[u'dir'][u'onlinedata'] else: datadir = settings[u'dir'][u'rawout'] # loop through all data folders alldata = os.listdir(datadir) for i in range(len(alldata)): # set the new datafile settings[u'analysisproperties'][u'datapath'] = os.path.join(datadir, alldata[i]) # show waiting message disp.fill(settings[u'bgc']) textsurf = settings[u'font'][u'large'][u'regular'].render(u"running analysis %d/%d, please wait..." % (i+1, len(alldata)), False, settings[u'fgc']) disp.blit(textsurf, (int(settings[u'dispcentre'][0]-textsurf.get_width()/2), int(settings[u'dispcentre'][1]-textsurf.get_height()/2))) pygame.display.flip() # prepare new Analysis analysis = Analysis(settings) # run analysis analysis.run() # show waiting message disp.fill(settings[u'bgc']) textsurf = settings[u'font'][u'large'][u'regular'].render(u"combining %d data files, please wait..." % (len(alldata)), False, settings[u'fgc']) disp.blit(textsurf, (int(settings[u'dispcentre'][0]-textsurf.get_width()/2), int(settings[u'dispcentre'][1]-textsurf.get_height()/2))) pygame.display.flip() # create a new output folder batchoutdir = os.path.join(settings[u'dir'][u'out'], u'batch') if not os.path.isdir(batchoutdir): os.mkdir(batchoutdir) # create a new text file in the new output folder batchtxt = open(os.path.join(batchoutdir, u'summary.txt'), u'w') # loop through all output textfiles i = 0 for dataset in alldata: # only use if the dataset is in fact a data set if os.path.isdir(os.path.join(settings[u'dir'][u'out'], dataset)): # read the textfile txtfile = open(os.path.join(settings[u'dir'][u'out'], dataset, u'summary.txt'), u'r') lines = txtfile.readlines() # write the line to the output (and the header if this is the # first file that is being read) if i == 0: batchtxt.write(lines[0]) batchtxt.write(lines[1] + u'\n') # increase iteration number i += 1 # close batch text file batchtxt.close() # average all the heatmaps # create an empty dict, with empty lists to contain data per maptype heatmapdata = {} maptypes = [u'cancellation', u'intersection', u'omission'] for maptype in maptypes: heatmapdata[maptype] = [] # go through all datasets and tasktypes, to collect all heatmap data for dataset in alldata: # only use if the dataset is in fact a data set if os.path.isdir(os.path.join(settings[u'dir'][u'out'], dataset)): # collect datasets for maptype in maptypes: # load data hm = numpy.load(os.path.join(settings[u'dir'][u'out'], dataset, u'raw_heatmap_data_%s.npy' % maptype)) # proportionalize data hm = hm / numpy.nanmax(hm) # store data heatmapdata[maptype].append(copy.deepcopy(hm)) # go through map types again, to create average heatmap plots for maptype in maptypes: # average datasets heatmapdata[maptype] = numpy.nansum(numpy.array(heatmapdata[maptype]), axis=0) / len(alldata) # dots per inch (float!) dpi = 100.0 # image size in pixels dispsize = (int(numpy.size(heatmapdata[maptype],axis=1)),int(numpy.size(heatmapdata[maptype],axis=0))) # image size in inches figsize = (dispsize[0]/dpi, dispsize[1]/dpi) # create a new figure fig = pyplot.figure(figsize=figsize, dpi=dpi, frameon=False) ax = pyplot.Axes(fig, [0,0,1,1]) ax.set_axis_off() fig.add_axes(ax) # draw heatmap ax.imshow(heatmapdata[maptype], cmap=u'jet', alpha=1, vmin=0, vmax=1) # set the axis to the display size ax.axis([0,dispsize[0],0,dispsize[1]]) # remove the axis grid ax.axis(u'off') # invert the y axis, as (0,0) is top left on a display ax.invert_yaxis() # save figure fig.savefig(os.path.join(batchoutdir, u'%s_average_heatmap.png' % maptype)) # show ending screen disp.fill(settings[u'bgc']) textsurf = settings[u'font'][u'large'][u'regular'].render(u"the analysis was succesfully completed", False, settings[u'fgc']) disp.blit(textsurf, (int(settings[u'dispcentre'][0]-textsurf.get_width()/2), int(settings[u'dispsize'][1]/3-textsurf.get_height()/2))) textsurf = settings[u'font'][u'large'][u'regular'].render(u"(click to return to the main menu)", False, settings[u'fgc']) disp.blit(textsurf, (int(settings[u'dispcentre'][0]-textsurf.get_width()/2), int(2*settings[u'dispsize'][1]/3-textsurf.get_height()/2))) pygame.display.flip() # wait for a click (allowing some time to unclick) pygame.time.wait(200) while check_mouseclicks()[0] == None: # allow an Android interrupt if settings[u'android']: if android.check_pause(): android.wait_for_resume() # switch back to start screen settings[u'currentscreen'] = u'start' disp.blit(settings[u'guiscreens'][settings[u'currentscreen']], (0,0)) pygame.display.flip() # allow a bit of time to unclick pygame.time.wait(200) return settings def start_analysis(settings): """Prepares the analysis, and runs it (saving data happens while running); afterwards the settings are returned arguments settings - app settings dict, which includes a dict on the task properties returns settings - same settings dict as was passed (updated) """ # get display disp = pygame.display.get_surface() disp.fill(settings[u'bgc']) # show loading message disp.fill(settings[u'bgc']) textsurf = settings[u'font'][u'large'][u'regular'].render(u"running analysis, please wait...", False, settings[u'fgc']) disp.blit(textsurf, (int(settings[u'dispcentre'][0]-textsurf.get_width()/2), int(settings[u'dispcentre'][1]-textsurf.get_height()/2))) pygame.display.flip() # prepare new Analysis analysis = Analysis(settings) # run analysis analysis.run() # show ending screen disp.fill(settings[u'bgc']) textsurf = settings[u'font'][u'large'][u'regular'].render(u"the analysis was succesfully completed", False, settings[u'fgc']) disp.blit(textsurf, (int(settings[u'dispcentre'][0]-textsurf.get_width()/2), int(settings[u'dispsize'][1]/3-textsurf.get_height()/2))) textsurf = settings[u'font'][u'large'][u'regular'].render(u"(click to return to the main menu)", False, settings[u'fgc']) disp.blit(textsurf, (int(settings[u'dispcentre'][0]-textsurf.get_width()/2), int(2*settings[u'dispsize'][1]/3-textsurf.get_height()/2))) pygame.display.flip() # wait for a click (allowing some time to unclick) pygame.time.wait(200) while check_mouseclicks()[0] == None: # allow an Android interrupt if settings[u'android']: if android.check_pause(): android.wait_for_resume() # switch back to start screen settings[u'currentscreen'] = u'start' disp.blit(settings[u'guiscreens'][settings[u'currentscreen']], (0,0)) pygame.display.flip() # allow a bit of time to unclick pygame.time.wait(200) return settings # # # # # # CLASSES class Analysis(): """The Analysis class if for running an analysis on stored data""" def __init__(self, settings): """Initializes an Analysis instance, which will be based on the passed analysis properties arguments settings - the app settings dict, which contains an analysisproperties dict, with the following keys: "datapath" - full path to a raw data directory "disthreshold" - distance threshold for transforming click coordinates to target coordinates (every click ourside of this threshold of any target will be disregarded) """ # version self.version = settings[u'version'] # read analysis properties self.properties = settings[u'analysisproperties'] # check if the dataset if from an onlinetask if os.path.dirname(settings[u'analysisproperties'][u'datapath']) == settings[u'dir'][u'onlinedata']: self.onlineset = True else: self.onlineset = False # check if the datapath exists # (directory for data from local tasks, textfile for online tasks) if os.path.isdir(self.properties[u'datapath']) or os.path.isfile(self.properties[u'datapath']): # store the data directory self.datadir = self.properties[u'datapath'] # get the output file name (stored under the name 'ppname', # which will later be updated to the name without a timestamp) self.ppname = os.path.basename(self.datadir) # get rid of the .txt extension for online datasets if self.onlineset: self.ppname = os.path.splitext(self.ppname)[0] # create a new output directory self.outdir = os.path.join(settings[u'dir'][u'out'],self.ppname) if not os.path.isdir(self.outdir): os.mkdir(self.outdir) # path to the main task directory (will be used later on to grab the # task image from) self.taskdir = settings[u'dir'][u'tasks'] # PLOT STUFF # dots per inch (float!) self.dpi = 100.0 # PDF size in inches self.pdfsize = (8.27,11.69) # colour to make things look pretty self.colours = copy.deepcopy(settings[u'colours']) # transform the colours to matplotlib colours (between 0 and 1) for k in self.colours.keys(): for i in range(len(self.colours[k])): self.colours[k][i] = (self.colours[k][i][0]/255.0, self.colours[k][i][1]/255.0, self.colours[k][i][2]/255.0) # set the font self.fontprop = font_manager.FontProperties(fname=settings[u'dir'][u'plotfont']) self.boldfontprop = font_manager.FontProperties(fname=settings[u'dir'][u'boldplotfont']) # prepare the analysis self.prepare() def prepare(self): """Prepares the analysis, by reading the raw data file and creating some variables accordingly; among these are a dict with paths to all relevant files, the participant name, the task name, the task date, the task time, the input type, the cancellation visibility, and the raw x and y coordinates, along with timestamps """ # FILE DICT # create a files dict, to contain paths to all relevant files self.files = {} # add the raw data file and the marked task image to the files dict if self.onlineset: self.files[u'raw'] = copy.copy(self.datadir) else: self.files[u'raw'] = os.path.join(self.datadir, u'raw.txt') # READ DATAFILE # open the data file df = open(self.files[u'raw'], 'r') # read all lines raw = df.readlines() # clean up and split the lines for i in range(len(raw)): raw[i] = raw[i].replace(u'\n',u'').replace(u'\r',u'').replace(u'"',u'').split(u'\t') # extract the header header = raw.pop(0) # STOP FURTHER PROCESSING IF THE FILE IS EMPTY if len(raw) < 1: self.fileisempty = True return else: self.fileisempty = False # SETTINGS # set some variables self.ppname = raw[0][header.index(u'ppname')] self.taskname = raw[0][header.index(u'taskname')] self.testdate = raw[0][header.index(u'testdate')] self.testtime = raw[0][header.index(u'testtime')] self.inputtype = raw[0][header.index(u'input')] self.visibility = raw[0][header.index(u'cancellations')] # add the task image and task coordinates path to the files dict self.files[u'task'] = os.path.join(self.taskdir, self.taskname, u'task.png') self.files[u'taskcors'] = os.path.join(self.taskdir, self.taskname, u'targets.txt') # IMAGE SETTINGS # load images self.taskimg = image.imread(self.files[u'task']) # image size in pixels self.dispsize = (int(numpy.size(self.taskimg,axis=1)),int(numpy.size(self.taskimg,axis=0))) # image size in inches self.figsize = (self.dispsize[0]/self.dpi, self.dispsize[1]/self.dpi) # DATA EXTRACTION # empty lists to contain data points self.time = [] self.x = [] self.y = [] self.cors = [] # extract data for i in range(len(raw)): self.time.append(raw[i][header.index(u'time')]) self.x.append(raw[i][header.index(u'x')]) self.y.append(raw[i][header.index(u'y')]) self.cors.append((int(self.x[i]),int(self.y[i]))) # lists to numpy arrays of integers self.time = numpy.array(self.time, dtype=int) self.x = numpy.array(self.x, dtype=int) self.y = numpy.array(self.y, dtype=int) # task duration self.duration = {u'total':self.time[-1]} # ms h = numpy.floor(self.duration[u'total'] / 3600000.0) m = numpy.floor((self.duration[u'total']-h*3600000.0) / 60000.0) s = numpy.ceil(((self.duration[u'total']-h*3600000.0)-m*60000.0) / 1000.0) self.duration[u'string'] = u"%2.f:%2.f:%2.f" % (h,m,s) self.duration[u'string'] = self.duration[u'string'].replace(u' ',u'0') return True def run(self): """Runs through all analysis, and creates output files in the output directory """ # # # # # # NO DATA if self.fileisempty: # get the data file name dataname = os.path.basename(self.datadir) # create a new text file in the output outfile = open(os.path.join(self.outdir, u"empty.txt"), u"w") # write a message to this file outfile.write(u"File '%s' contains no data." % dataname) # close the text file outfile.close() # # # # # # DATA ANALYSIS else: # TARGETS # read the target coordinates for this task self.read_target_cors() # transform clicks to targets self.clicks_to_targets() # STOP WITHOUT CANCELLATIONS if len(self.ctx) < 1: # get the data file name dataname = os.path.basename(self.datadir) # create a new text file in the output outfile = open(os.path.join(self.outdir, u"empty.txt"), u"w") # write a message to this file outfile.write(u"File '%s' contains no cancellations." % dataname) # close the text file outfile.close() # stop further processing return # NEGLECT MEASURES # calculate the amount of omissions self.calc_omissions() # calculate centre of cancellation (x and y) self.calc_centre_of_cancellation() # DISORGANIZED SEARCH MEASURES # calculate the amount of revisits self.calc_total_revisits() self.calc_immediate_revisits() self.calc_delayed_revisits() # calculate the mean inter-cancellation (standardized) distance self.calc_mean_interdist() self.calc_stand_interdist() # calculate the mean inter-cancellation time and the search speed self.calc_mean_intertime() self.calc_search_speed() # calculate the Q score self.calc_qscore() # calculate the mean angle between cancellations self.calc_mean_angle() # calculate the standardized angle between cancellations self.calc_stand_angle() # calculate the best R self.calc_best_r() # calculate intersection rate self.calc_intersect_rate() # calculate the first cancellation self.calc_first_cancellation() # # # # # # OUTPUT FILES # text document with all values self.summary_txt() # cancellation path self.plot_cancellation_path() # heatmaps self.plot_heatmap(maptype=u'cancellation') self.plot_heatmap(maptype=u'omission') self.plot_heatmap(maptype=u'intersection') # cancellation heatmap superimposed over the task self.plot_superimposed_heatmap(maptype=u'cancellation') self.plot_superimposed_heatmap(maptype=u'omission') self.plot_superimposed_heatmap(maptype=u'intersection') # best R plots self.plot_best_r() # summary of everything in a PDF self.summary_pdf() # close all figures (in case Matplotlib was in interactive mode) pyplot.close(u'all') # # # # # # CALCULATORS # TARGETS AND TRANSFORMATIONS def read_target_cors(self): """Reads all the target coordinates, and adds these to self.tarcors, which is a dict containing two keys: u'x' and u'y', both containing NumPy arrays of the x and y coordinates""" # READ DATAFILE # open the data file df = open(self.files[u'taskcors'], 'r') # read all lines raw = df.readlines() # clean up and split the lines for i in range(len(raw)): raw[i] = raw[i].replace(u'\n',u'').replace(u'\r',u'').replace(u'"',u'').split(u'\t') # extract the header header = raw.pop(0) # DATA EXTRACTION # empty lists to contain data points self.tarcors = {u'x':[], u'y':[], u'cors':[]} # extract data for i in range(len(raw)): self.tarcors[u'x'].append(raw[i][header.index(u'x')]) self.tarcors[u'y'].append(raw[i][header.index(u'y')]) self.tarcors[u'cors'].append((int(self.tarcors[u'x'][i]),int(self.tarcors[u'y'][i]))) # lists to numpy arrays of integers self.tarcors[u'x'] = numpy.array(self.tarcors[u'x'], dtype=int) self.tarcors[u'y'] = numpy.array(self.tarcors[u'y'], dtype=int) def clicks_to_targets(self): """Transforms original click coordinates to target coordinates for all clicks within the distance treshold from a target""" # read target coordinates if this has not been done yet if not hasattr(self, u'tarcors'): self.read_target_cors() # empty lists for click-target transformed (ct) coordinates self.ctt = [] self.ctx = [] self.cty = [] self.ctcors = [] # match click positions to star positions for i in range(0,len(self.x)): # check Cartesian distance dist = (self.tarcors[u'x']-self.x[i])**2 + (self.tarcors[u'y']-self.y[i])**2 # determine lowest value (=clicked target) clickcor = numpy.argmin(dist) if dist[clickcor]**0.5 < self.properties[u'disthreshold']: # add transformed click to lists self.ctt.append(self.time[i]) self.ctx.append(self.tarcors[u'x'][clickcor]) self.cty.append(self.tarcors[u'y'][clickcor]) self.ctcors.append((self.ctx[-1],self.cty[-1])) # NEGLECT MEASURES def calc_omissions(self): """Calculates the amount of omissions""" # read target coordinates if this has not been done yet if not hasattr(self, u'tarcors'): self.read_target_cors() # calculate click transformed coordinates if this has not been done if not hasattr(self, u'ctcors'): self.clicks_to_targets() # loop through all target coordinates self.omissions = { u'cors':[], u'x':numpy.zeros(len(self.tarcors[u'cors'])), u'y':numpy.zeros(len(self.tarcors[u'cors'])), u'total':0} for i in range(len(self.tarcors[u'cors'])): # if the target coordinate does not appear in the list of # clicked targets, it was not cancelled c = self.tarcors[u'cors'][i] if self.ctcors.count(c) < 1: self.omissions[u'x'][i] = c[0] self.omissions[u'y'][i] = c[1] self.omissions[u'cors'].append(c) self.omissions[u'total'] += 1 # correct the length of the arrays self.omissions[u'x'] = self.omissions[u'x'][self.omissions[u'x']>0] self.omissions[u'y'] = self.omissions[u'y'][self.omissions[u'y']>0] # number of omissions per half self.omissions[u'left'] = len(self.omissions[u'x'][self.omissions[u'x']<self.dispsize[0]/2]) self.omissions[u'right'] = len(self.omissions[u'x'][self.omissions[u'x']>self.dispsize[0]/2]) def calc_centre_of_cancellation(self): """Calculates the horizontal and vertical centres of cancellation""" # calculate click transformed coordinates if this has not been done if not hasattr(self, u'ctx'): self.clicks_to_targets() # the centre of cancellation is the mean of the cancelled targets' # x or y positions, normalized between -1* and 1** (right/bottom); # 0 means no bias towards either side # * -1 corresponds with the left or upmost target position # ** 1 corresponds with the right or bottommost target position xbounds = [numpy.min(self.tarcors[u'x']), numpy.max(self.tarcors[u'x'])] ybounds = [numpy.min(self.tarcors[u'y']), numpy.max(self.tarcors[u'y'])] fieldsize = [xbounds[1]-xbounds[0], ybounds[1]-ybounds[0]] self.coc = {} self.coc[u'x'] = (numpy.mean(numpy.unique(self.ctx)) - xbounds[0] - fieldsize[0]/2.0) / float(fieldsize[0]/2.0) self.coc[u'y'] = (numpy.mean(numpy.unique(self.cty)) - ybounds[0] - fieldsize[1]/2.0) / float(fieldsize[1]/2.0) # DISORGANIZED SEARCH MEASURES def calc_total_revisits(self): """Calculates the total amount of revisits""" # read target coordinates if this has not been done yet if not hasattr(self, u'tarcors'): self.read_target_cors() # calculate click transformed coordinates if this has not been done if not hasattr(self, u'ctcors'): self.clicks_to_targets() # loop through all target coordinates self.pers = {} self.pers[u'tot'] = 0 for c in self.tarcors[u'cors']: # the total number of revisits is the number of times a # target was clicked, minus one (for the first time the targer # was clicked) if self.ctcors.count(c) > 1: self.pers[u'tot'] += self.ctcors.count(c) - 1 def calc_immediate_revisits(self): """Calculates the amount of immediate revisits""" # calculate click transformed coordinates if this has not been done if not hasattr(self, u'ctx'): self.clicks_to_targets() # repetitions in the coordinates will result in a diff of 0; # numpy.where will give the index numbers of these coordinates; # these are the persevarations per individual axis px = numpy.where(numpy.diff(self.ctx)==0)[0] py = numpy.where(numpy.diff(self.cty)==0)[0] # numpy.intersect1d gives the sorted, unique values that are in both # index number arrays: the immediate revisits on both axis imp = numpy.intersect1d(px, py) # save value self.pers[u'imm'] = len(imp) def calc_delayed_revisits(self): """Calculates the amount of delayed revisits""" # calculate the total amount of revisits and the amount of # immediate revisits, if this has not been done yet if hasattr(self, u'pers'): if not u'tot' in self.pers.keys(): self.calc_total_revisits() if not u'imm' in self.pers.keys(): self.calc_immediate_revisits() else: self.calc_total_revisits() self.calc_immediate_revisits() # the number of delayed revisits, is the total number of # revisits minus the number of immediate revisits self.pers[u'del'] = self.pers[u'tot'] - self.pers[u'imm'] def calc_mean_interdist(self): """Calculates the mean distance between cancellations""" # calculate click transformed coordinates if this has not been done if not hasattr(self, u'ctx'): self.clicks_to_targets() # empty array to contain interdistances self.intdist = {u'all':numpy.zeros(len(self.ctx)-1)} # calculate interdistances for i in range(len(self.intdist[u'all'])): self.intdist[u'all'][i] = ((self.ctx[i]-self.ctx[i+1])**2 + (self.cty[i]-self.cty[i+1])**2)**0.5 # calculate mean interdistance (but only for distances greater than # 0, as an intdist of 0 reflects a revisit) self.intdist[u'mean'] = numpy.mean(self.intdist[u'all'][self.intdist[u'all']>0]) def calc_stand_interdist(self): """Calculates the standardized interdistance""" # read target coordinates if this has not been done yet if not hasattr(self, u'tarcors'): self.read_target_cors() # calculate the mean interdistance, if this has not been done yet if hasattr(self,u'intdist'): if not u'mean' in self.intdist.keys(): self.calc_mean_interdist() else: self.calc_mean_interdist() # calculate mean distance between closest targets self.intdist[u'alltar'] = numpy.zeros(len(self.tarcors[u'x'])) # loop through all targets for i in range(len(self.tarcors[u'x'])): # calculate the distances between the current target and all # the other targets (one of these will result in a distance of # 0: the coordinate of the current target, so we do not look at # distances of 0) intdist = ((self.tarcors[u'x'] - self.tarcors[u'x'][i])**2 + (self.tarcors[u'y'] - self.tarcors[u'y'][i])**2)**0.5 # get the distance to the closest neighbour self.intdist[u'alltar'][i] = numpy.min(intdist[intdist>0]) # calculate the mean lowest target interdistance self.intdist[u'meantar'] = numpy.mean(self.intdist[u'alltar']) # calculate the standardized interdistance self.intdist[u'standardized'] = self.intdist[u'mean'] / self.intdist[u'meantar'] def calc_mean_intertime(self): """Calculates the mean time between cancellations""" # empty array to contain inter-cancellation times self.inttime = {u'all':numpy.zeros(len(self.ctt)-1)} # calculate inter-cancellation times for i in range(len(self.inttime[u'all'])): self.inttime[u'all'][i] = self.ctt[i+1] - self.ctt[i] # calculate mean inter-cancellation time self.inttime[u'mean'] = numpy.mean(self.inttime[u'all']) def calc_search_speed(self): """Calculates the search speed: mean(distance / time)""" # calculate the average interdistance if this has not been done yet if not hasattr(self, u'intdist'): self.calc_mean_interdistance() # calculate the average intertime if this has not been done yet if not hasattr(self, u'inttime'): self.calc_mean_intertime() # calculate the mean search speed self.searchspd = numpy.mean(self.intdist[u'all'] / self.inttime[u'all']) def calc_qscore(self): """Calculates the Q score (Hills & Geldmacher, 1998)""" # read target coordinates if this has not been done yet if not hasattr(self, u'tarcors'): self.read_target_cors() # calculate the amount of omissions if this has not been done yet if not hasattr(self, u'omissions'): self.calc_omissions() # calculate the Q score # (correct responses/total target) * (correct responses / total time) corresps = float(len(self.tarcors[u'cors']) - self.omissions[u'total']) self.qscore = (corresps / len(self.tarcors[u'cors'])) * (corresps / (self.duration[u'total']/1000)) def calc_mean_angle(self): """Calculates the mean angle between cancellations, where 0 means all cancellations are on a horizontal line, and 90 means all cancellations are on a vertical line""" # calculate click transformed coordinates if this has not been done if not hasattr(self, u'ctx'): self.clicks_to_targets() # calculate the interdistances if this has not been done yet if hasattr(self, u'intdist'): if not u'all' in self.intdist.keys(): self.calc_mean_interdist() else: self.calc_mean_interdist() # empty array for all intercancellation angles self.angle = {u'all':numpy.zeros(len(self.ctx)-1)} # calculate the intercancellation angles for i in range(len(self.angle['all'])): # calculate the vertical distance ydist = float(abs(self.cty[i]-self.cty[i+1])) # check if there is an interdistance (otherwise it's a # revisit, and those do not have an interangle) if self.intdist[u'all'][i] > 0: self.angle['all'][i] = math.degrees(math.asin(ydist/self.intdist['all'][i])) # invalid angles (revisits) will be marked -1, and are # not used in further calculations else: self.angle['all'][i] = -1 # calculate the mean intercancellation angle self.angle[u'mean'] = numpy.mean(self.angle['all'][self.angle['all']>=0]) def calc_stand_angle(self): """Calculates the standardized angle between cancellations, where a value of 1 means all cancellations where on a horizontal or vertical line (very organised), and 0 means all cancellations were diagonal (disorganised)""" # calculate all intercancellation angles, if this has not been done if hasattr(self, u'angle'): if not u'all' in self.angle.keys(): self.calc_mean_angle() else: self.calc_mean_angle() # calculate the standardized angles (invalid angles will have a # value below 0, we do not take those into account self.angle['allstd'] = numpy.abs(((self.angle['all'][self.angle['all']>=0]/90.0)*2) -1) # calculate the mean standardized angle self.angle[u'standardized'] = numpy.mean(self.angle['allstd']) def calc_best_r(self): """Calculates the 'best r' value, based on Mark et al. (2004). The best r is the highest of the absolute values of the Pearson correlations between both the x and the y values and the cancellation number (the cancellation rank order)""" # calculate click transformed coordinates if this has not been done if not hasattr(self, u'ctx'): self.clicks_to_targets() # empty dict to contain values self.bestr = {} # correlations rank = numpy.arange(1,len(self.ctx)+1,1) self.bestr[u'x'] = pearsonr(rank, self.ctx) self.bestr[u'y'] = pearsonr(rank, self.cty) self.bestr[u'best'] = max([abs(self.bestr[u'x']),abs(self.bestr[u'y'])]) def calc_intersect_rate(self): """Calculates the amount of cancellation path intersections""" # calculate click transformed coordinates if this has not been done if not hasattr(self, u'ctx'): self.clicks_to_targets() # empty dict to hold all intersection coordinates self.intersections = {u'x':[], u'y':[], u'cors':[]} # loop through all lines for i in range(len(self.ctcors)-1): # loop through all lines after this one (not before, as we do # not want to count any intersections double!) for j in range(i+1, len(self.ctcors)-1): # line starting and ending coordinates line1 = (self.ctcors[i],self.ctcors[i+1]) line2 = (self.ctcors[j],self.ctcors[j+1]) # find any intersections intersect = intersection(line1,line2) # if there is an intersection, add it to the list if intersect: self.intersections[u'x'].append(intersect[0]) self.intersections[u'y'].append(intersect[1]) self.intersections[u'cors'].append(intersect) # lists to arrays self.intersections[u'x'] = numpy.array(self.intersections[u'x']) self.intersections[u'y'] = numpy.array(self.intersections[u'y']) # count the amount and rate of intersections self.intersections[u'total'] = len(self.intersections[u'cors']) self.intersections[u'rate'] = self.intersections['total'] / float(len(self.ctcors) - self.pers[u'imm']) def calc_first_cancellation(self): """Calculates the location of the first cancellation in normalized space top-left = (0,0), bottom-right = (1,1), and determines the quadrant of this first cancellation""" # calculate click transformed coordinates if this has not been done if not hasattr(self, u'ctx'): self.clicks_to_targets() # emtpy dict to contain data self.firstcancel = {} # normalized coordinate self.firstcancel[u'norm'] = (self.ctx[0] / float(self.dispsize[0]), self.cty[0] / float(self.dispsize[1])) # quadrant self.firstcancel[u'quad'] = u'' if self.firstcancel[u'norm'][1] < 0.5: self.firstcancel[u'quad'] += u'top-' else: self.firstcancel[u'quad'] += u'bottom-' if self.firstcancel[u'norm'][0] < 0.5: self.firstcancel[u'quad'] += u'left' else: self.firstcancel[u'quad'] += u'right' # # # # # # PLOTTERS def plot_cancellation_path(self): """Plots all cancellations, showing a rank number for each cancellation, and a line going from point to point""" # read target coordinates if this has not been done yet if not hasattr(self, u'tarcors'): self.read_target_cors() # calculate click transformed coordinates if this has not been done if not hasattr(self, u'ctx'): self.clicks_to_targets() # create new figure fig, ax = pyplot.subplots(nrows=1,ncols=1) fig.set_dpi(self.dpi) fig.set_size_inches((self.figsize[0]*0.65,self.figsize[1]*0.65), forward=True) #fig.set_size_inches(self.figsize, forward=True) ax.set_axis_off() # draw all targets (black dots) ax.plot(self.tarcors[u'x'], self.tarcors[u'y'], 'o', color=self.colours[u'aluminium'][5], markersize=5, label=u"targets") # draw all clicks (red crosses) ax.plot(self.x, self.y, 'x', color=self.colours[u'scarletred'][2], markersize=15, markeredgewidth=3, label=u"clicks") # draw all cancellations (green crosses) ax.plot(self.ctx, self.cty, 'x', color=self.colours[u'chameleon'][2], markersize=15, markeredgewidth=3, label=u"cancellations") # draw the cancellation path (blue line) ax.plot(self.ctx, self.cty, '-', color=self.colours[u'skyblue'][0], linewidth=3, label=u"cancelpath") # annotate the cancellation rank numbers for i in range(1,len(self.ctcors)+1): ax.annotate(unicode(i), (self.ctcors[i-1]), fontsize=24, fontproperties=self.fontprop) # add a legend ax.legend(loc=u'lower right')#, fontproperties=self.fontprop) # fix axis ax.axis([0,self.dispsize[0],0,self.dispsize[1]]) ax.invert_yaxis() # title #ax.set_title(u"participant '%s', '%s' task (%s %s)" % (self.ppname,self.taskname,self.testdate,self.testtime), fontproperties=self.fontprop) # save the figure self.files[u'cancelpath'] = os.path.join(self.outdir, u'cancellation_path.png') fig.savefig(self.files[u'cancelpath']) def _heatmap_maximum(self): """For internal use! Calculates the theoretical maximum value of a cancellation or omission heatmap""" # HEATMAP # Gaussian gwh = int(self.dispsize[0]/2) gsdwh = gwh/6 gaus = gaussian(gwh,gsdwh) # matrix of zeroes strt = gwh/2 heatmapsize = self.dispsize[1] + 2*strt, self.dispsize[0] + 2*strt heatmap = numpy.zeros(heatmapsize, dtype=float) # run through all targets for x, y in self.tarcors[u'cors']: # correct Gaussian size if either coordinate falls outside of # display boundaries if (not 0 < x < self.dispsize[0]) or (not 0 < y < self.dispsize[1]): hadj=[0,gwh];vadj=[0,gwh] if 0 > x: hadj[0] = abs(x) x = 0 elif self.dispsize[0] < x: hadj[1] = gwh - int(x-self.dispsize[0]) if 0 > y: vadj[0] = abs(y) y = 0 elif self.dispsize[1] < y: vadj[1] = gwh - int(y-self.dispsize[1]) # add adjusted Gaussian to the current heatmap heatmap[y:y+vadj[1],x:x+hadj[1]] += gaus[vadj[0]:vadj[1],hadj[0]:hadj[1]] else: # add Gaussian to the current heatmap heatmap[y:y+gwh,x:x+gwh] += gaus # calculate maximum self.heatmapvmax = numpy.max(heatmap) def plot_heatmap(self, maptype=u'cancellation'): """Plots a heatmap of the cancelled targets keyword arguments maptype -- string indicating the type of heatmap to be produced; the options are: 'cancellations' for a heatmap of the cancelled targets 'omissions' for a heatmap of the omissions 'intersections' for a heatmap of the intersections """ # read target coordinates if this has not been done yet if not hasattr(self, u'tarcors'): self.read_target_cors() # calculate click transformed coordinates if this has not been done if not hasattr(self, u'ctcors'): self.clicks_to_targets() # calculate the heatmap maximum, if this has not been done yet if not hasattr(self, u'heatmapvmax'): self._heatmap_maximum() # DETERMINE COORDINATES gauscors = [] # coordinates for the cancellations if maptype == u'cancellation': # run through all targets for i in range(0,len(self.tarcors[u'x'])): # check if the target was cancelled if (self.tarcors[u'x'][i],self.tarcors[u'y'][i]) in self.ctcors: gauscors.append(self.tarcors[u'cors'][i]) # coordinates for the omissions elif maptype == u'omission': gauscors = copy.deepcopy(self.omissions[u'cors']) # coordinates for the intersections elif maptype == u'intersection': gauscors = copy.deepcopy(self.intersections[u'cors']) # if the maptype was incorrectly specified, print message and return else: print(u"ValueError in libanalysis.plot_heatmap: maptype '%s' not recognized" % maptype) return # HEATMAP # Gaussian gwh = int(self.dispsize[0]/2) gsdwh = gwh/6 gaus = gaussian(gwh,gsdwh) # matrix of zeroes strt = gwh/2 heatmapsize = self.dispsize[1] + 2*strt, self.dispsize[0] + 2*strt heatmap = numpy.zeros(heatmapsize, dtype=float) # run through all targets for x, y in gauscors: # correct Gaussian size if either coordinate falls outside of # display boundaries if (not 0 < x < self.dispsize[0]) or (not 0 < y < self.dispsize[1]): hadj=[0,gwh];vadj=[0,gwh] if 0 > x: hadj[0] = abs(x) x = 0 elif self.dispsize[0] < x: hadj[1] = gwh - int(x-self.dispsize[0]) if 0 > y: vadj[0] = abs(y) y = 0 elif self.dispsize[1] < y: vadj[1] = gwh - int(y-self.dispsize[1]) # add adjusted Gaussian to the current heatmap heatmap[y:y+vadj[1],x:x+hadj[1]] += gaus[vadj[0]:vadj[1],hadj[0]:hadj[1]] else: # add Gaussian to the current heatmap heatmap[y:y+gwh,x:x+gwh] += gaus # resize heatmap heatmap = heatmap[strt:self.dispsize[1]+strt,strt:self.dispsize[0]+strt] # SCALE TO THEORETICAL MAXIMUM if maptype in [u'cancellation', u'omission']: if self.heatmapvmax > 0: heatmap = heatmap / self.heatmapvmax vmax = 1 else: vmax = None # SAVE ARRAY self.files[u'%srawheatmap' % maptype] = os.path.join(self.outdir, u'raw_heatmap_data_%s.npy' % maptype) numpy.save(self.files[u'%srawheatmap' % maptype], heatmap) # HEATMAP IMAGE # create a new figure fig = pyplot.figure(figsize=self.figsize, dpi=self.dpi, frameon=False) ax = pyplot.Axes(fig, [0,0,1,1]) ax.set_axis_off() fig.add_axes(ax) # draw heatmap ax.imshow(heatmap, cmap=u'jet', alpha=1, vmax=vmax) # set the axis to the display size ax.axis([0,self.dispsize[0],0,self.dispsize[1]]) # remove the axis grid ax.axis(u'off') # invert the y axis, as (0,0) is top left on a display ax.invert_yaxis() # save figure self.files[u'%sheatmap' % maptype] = os.path.join(self.outdir, u'%s_heatmap.png' % maptype) fig.savefig(self.files[u'%sheatmap' % maptype]) # TRANSPARANT HEATMAP IMAGE # if there are no Gaussian coordinates, make whole map transparant if len(gauscors) == 0: heatmap[heatmap==0] = numpy.NaN # remove low values from heatmap else: lowestval = numpy.min(heatmap) lowbound = 0.15 #numpy.mean(heatmap[heatmap>0]) heatmap[heatmap<=lowbound] = numpy.NaN # hidden pixel, to re-introduce lowest value (for colour scaling) heatmap[0][0] = lowestval # create a new figure fig = pyplot.figure(figsize=self.figsize, dpi=self.dpi, frameon=False) ax = pyplot.Axes(fig, [0,0,1,1]) ax.set_axis_off() fig.add_axes(ax) # draw heatmap ax.imshow(heatmap, cmap=u'jet', alpha=1, vmax=vmax) # set the axis to the display size ax.axis([0,self.dispsize[0],0,self.dispsize[1]]) # remove the axis grid ax.axis(u'off') # invert the y axis, as (0,0) is top left on a display ax.invert_yaxis() # save figure self.files[u'%salphaheatmap' % maptype] = os.path.join(self.outdir, u'%s_heatmap_transparant.png' % maptype) fig.savefig(self.files[u'%salphaheatmap' % maptype]) def plot_superimposed_heatmap(self, maptype=u'cancellation'): """Plots a heatmap superimposed on the task image""" # draw heatmap if this has not been done yet if not u'%salphaheatmap' % maptype in self.files.keys(): self.plot_heatmap(maptype=maptype) # create a new figure fig = pyplot.figure(figsize=(self.dispsize[0]/self.dpi, self.dispsize[1]/self.dpi), dpi=self.dpi, frameon=False) ax = pyplot.Axes(fig, [0,0,1,1]) ax.set_axis_off() fig.add_axes(ax) # load images taskimg = image.imread(self.files[u'task']) heatmap = image.imread(self.files[u'%salphaheatmap' % maptype]) # resize task image taskimg = numpy.resize(taskimg, (numpy.size(heatmap,axis=0),numpy.size(heatmap,axis=1))) # draw task ax.imshow(self.taskimg, origin=u'upper', alpha=1) # superimpose heatmap ax.imshow(heatmap, alpha=0.5) # save figure self.files[u'%staskheatmap' % maptype] = os.path.join(self.outdir, u'%s_heatmap_superimposed.png' % maptype) fig.savefig(self.files[u'%staskheatmap' % maptype]) def plot_best_r(self): """Plots the correlations between cancellation rank number and cancellation x and y coordinates""" # calculate click transformed coordinates if this has not been done if not hasattr(self, u'ctx'): self.clicks_to_targets() # calculate click transformed coordinates if this has not been done if not hasattr(self, u'bestr'): self.calc_best_r() # create new figure fig, (ax1,ax2) = pyplot.subplots(nrows=2,ncols=1, sharex=True) fig.set_dpi(self.dpi) fig.set_size_inches(self.figsize, forward=True) # plot rank = numpy.arange(1,len(self.ctx)+1,1) ax1.plot(rank, self.ctx, '-', color=self.colours[u'chameleon'][2], label=u"R=%1.2f" % self.bestr[u'x']) ax2.plot(rank, self.cty, '-', color=self.colours[u'plum'][2], label=u"R=%1.2f" % self.bestr[u'y']) # finish plot ax1.legend(loc=u'best')#, fontproperties=self.fontprop) ax2.legend(loc=u'best')#, fontproperties=self.fontprop) ax1.set_ylabel(u"horizontal position (pixels)", fontproperties=self.fontprop) ax2.set_ylabel(u"vertical position (pixels)", fontproperties=self.fontprop) ax2.set_xlabel(u"cancellation rank number", fontproperties=self.fontprop) fig.suptitle(u"best R: %1.2f (participant '%s', task '%s')" % (self.bestr[u'best'],self.ppname,self.taskname), fontproperties=self.fontprop) # save figure self.files[u'bestr'] = os.path.join(self.outdir, u'best_r_plots.png') fig.savefig(self.files[u'bestr']) # SUMMARIES def summary_txt(self): """Creates a simple text file, containing all the measures""" # CHECKS # check if the omissions have been calculated if not hasattr(self, u'omissions'): self.calc_omissions() # check if the centre of cancellation has been calculated if not hasattr(self, u'coc'): self.calc_centre_of_cancellation() # check if the omissions have been calculated if hasattr(self, u'pers'): if u'tot' not in self.pers.keys(): self.calc_total_revisits() if u'imm' not in self.pers.keys(): self.calc_immediate_revisits() if u'del' not in self.pers.keys(): self.calc_delayed_revisits() else: self.calc_total_revisits() self.calc_immediate_revisits() self.calc_delayed_revisits() # check if the standardized interdistance has been calculated if hasattr(self, u'intdist'): if not u'standardized' in self.intdist.keys(): self.calc_stand_interdist() else: self.calc_stand_interdist() # check if the intertime has been calculated if not hasattr(self, u'inttime'): self.calc_mean_intertime() # check if the search speed has been calculated if not hasattr(self, u'searchspd'): self.calc_search_speed() # check if the Q score has been calculated if not hasattr(self, u'qscore'): self.calc_qscore() # check if the standardized angle has been calculated if hasattr(self, u'angle'): if not u'standardized' in self.angle.keys(): self.calc_stand_angle() else: self.calc_stand_angle() # check if the best R has been calculated if not hasattr(self, u'bestr'): self.calc_best_r() # check if the intersection rate has been calculated if not hasattr(self, u'intersections'): self.calc_intersect_rate() # check if the first cancellation has been calculated if not hasattr(self, u'firstcancel'): self.calc_first_cancellation() # open a new textfile self.files[u'txt'] = os.path.join(self.outdir, u'summary.txt') txtfile = open(self.files[u'txt'], 'w') # write the header to the file header = [ u'ppname', u'taskname', u'testdate', u'testtime', \ u'om_tot',u'om_left',u'om_right', \ u'revisits_tot',u'revisits_imm', u'revisits_del', \ u'CoC_hor',u'CoC_ver', \ u'duration',u'mean_intertime', u'Qscore', \ u'mean_interdist', u'stand_interdist',u'speed', \ u'mean_angle',u'stand_angle', \ u'bestR',u'hor_R',u'ver_R', \ u'intersect_tot',u'intersect_rate', \ u'first_cancel_x', u'first_cancel_y', u'first_quadrant'] txtfile.write(u"\t".join(header)) txtfile.write(u"\n") # write the output to the file output = [ self.ppname, self.taskname, self.testdate, self.testtime, \ self.omissions[u'total'],self.omissions[u'left'],self.omissions[u'right'], \ self.pers[u'tot'],self.pers[u'imm'],self.pers[u'del'], \ self.coc[u'x'],self.coc[u'y'], \ self.duration['total']/1000, self.inttime[u'mean']/1000.0, self.qscore, \ self.intdist[u'mean'], self.intdist[u'standardized'], self.searchspd, \ self.angle[u'mean'], self.angle[u'standardized'], \ self.bestr[u'best'], self.bestr[u'x'], self.bestr[u'y'], \ self.intersections[u'total'], self.intersections[u'rate'], \ self.firstcancel[u'norm'][0], self.firstcancel[u'norm'][1], self.firstcancel[u'quad'] ] output = map(unicode, output) txtfile.write(u"\t".join(output)) # close the textfile txtfile.close() def summary_pdf(self): """Creates an A4-sized PDF, showing the important plots and all calculated measures""" # A4 dimensions (landscape): # 11.69x8.27 inches, 300 dpi (results in 3507x2481 px) # 11.69x8.27 inches, 600 dpi (results in 7014x4962 px) # CHECKS # check if the omissions have been calculated if not hasattr(self, u'omissions'): self.calc_omissions() # check if the centre of cancellation has been calculated if not hasattr(self, u'coc'): self.calc_centre_of_cancellation() # check if the omissions have been calculated if hasattr(self, u'pers'): if u'tot' not in self.pers.keys(): self.calc_total_revisits() if u'imm' not in self.pers.keys(): self.calc_immediate_revisits() if u'del' not in self.pers.keys(): self.calc_delayed_revisits() else: self.calc_total_revisits() self.calc_immediate_revisits() self.calc_delayed_revisits() # check if the standardized interdistance has been calculated if hasattr(self, u'intdist'): if not u'standardized' in self.intdist.keys(): self.calc_stand_interdist() else: self.calc_stand_interdist() # check if the intertime has been calculated if not hasattr(self, u'inttime'): self.calc_mean_intertime() # check if the search speed has been calculated if not hasattr(self, u'searchspd'): self.calc_search_speed() # check if the Q score has been calculated if not hasattr(self, u'qscore'): self.calc_qscore() # check if the standardized angle has been calculated if hasattr(self, u'angle'): if not u'standardized' in self.angle.keys(): self.calc_stand_angle() else: self.calc_stand_angle() # check if the best R has been calculated if not hasattr(self, u'bestr'): self.calc_best_r() # check if the intersection rate has been calculated if not hasattr(self, u'intersections'): self.calc_intersect_rate() # check if the first cancellation has been calculated if not hasattr(self, u'firstcancel'): self.calc_first_cancellation() # check if the cancellation path has been plotted if not u'cancelpath' in self.files.keys(): self.plot_cancellation_path() # check if the heatmap has been plotted if not u'cancellationtaskheatmap' in self.files.keys(): self.plot_superimposed_heatmap(maptype=u'cancellation') # PLOTTING # create a new figure pdf = pyplot.figure(figsize=self.pdfsize, dpi=self.dpi*6, frameon=False) # draw the images imgnames = [u'cancelpath',u'cancellationtaskheatmap'] axtitles = [u'cancellation path',u'cancellation heatmap',u'analysis output'] bottoms = [0.35, 0.03, 0.7] for i in range(len(imgnames)): # add new axis (rect is [left,bottom,width,height]) ax = pyplot.Axes(pdf, [0,bottoms[i],1,0.3]) ax.set_axis_off() pdf.add_axes(ax) # load and draw the image img = image.imread(self.files[imgnames[i]]) ax.imshow(img) # add title ax.set_title(axtitles[i], fontproperties=self.fontprop) # add axis for text ax = pyplot.Axes(pdf, [0,bottoms[-1],1,0.25]) ax.set_axis_off() pdf.add_axes(ax) ax.set_title(axtitles[-1], fontproperties=self.fontprop) # texts (two columns in two lists) texts = [[u" ", u"created using CancellationTools (version %s)" % self.version, u"<url>www.pygaze.org/cancellation", u"<b>participant: %s" % self.ppname, u"task: %s (%s, %s)" % (self.taskname,self.testdate,self.testtime), u"<b>neglect measures", u"omissions: %d (left: %d, right: %d)" % (self.omissions[u'total'],self.omissions[u'left'],self.omissions[u'right']), u"centre of cancellation: %1.2f (vertical: %1.2f)" % (self.coc[u'x'],self.coc[u'y']), u"<b>revisits", u"immediate: %d" % self.pers[u'imm'], u"delayed: %d" % self.pers[u'del']], [ u" ", u"<b>timing", u"duration: %s" % (self.duration['string']), u"average inter-cancellation time: %.2f s" % (self.inttime[u'mean']/1000.0), u"search speed: %.2f cancellations per second" % (self.searchspd), u"Q score: %.2f" % (self.qscore), u"<b>path measures", u"distance; mean: %d px, standardized: %d" % (self.intdist[u'mean'],self.intdist[u'standardized']), u"standardized angle: %1.2f" % self.angle[u'standardized'], u"best R: %1.2f" % self.bestr[u'best'], u"intersections rate: %.2f (total: %d)" % (self.intersections[u'rate'],self.intersections[u'total'])] ] # draw texts for c in range(len(texts)): for r in range(len(texts[c])): if u"<b>" in texts[c][r]: ax.text(0.1+0.5*c, 1-(0.1*r), texts[c][r].replace(u"<b>",u""), fontsize=10, fontproperties=self.boldfontprop) elif u"<url>" in texts[c][r]: ax.text(0.1+0.5*c, 1-(0.1*r), texts[c][r].replace(u"<url>",u""), color=self.colours[u'skyblue'][2], fontsize=10, fontproperties=self.fontprop) else: ax.text(0.1+0.5*c, 1-(0.1*r), texts[c][r], fontsize=10, fontproperties=self.fontprop) # save PDF self.files[u'pdf'] = os.path.join(self.outdir, u'summary.pdf') pdf.savefig(self.files[u'pdf'])
esdalmaijer/CancellationTools
libcancellation/libanalysis.py
Python
gpl-3.0
50,955
[ "Gaussian" ]
595d3111c50633c8e8056fa38e3271bef61cc9155ff586bdaba32ae45e6313a4
# -*- mode: python; coding: utf-8 -*- """ FireLogger_ server-side support library for Python. For usage see ``README.txt`` or visit the `github homepage`_. .. _FireLogger: https://addons.mozilla.org/en-US/firefox/addon/11090 .. _github homepage: http://github.com/darwin/firepython """ __api_version__ = '0.8' # ^--- corresponds to api version of firelogger __version__ = '0.7.0' # for python package releases
arcticio/ice-bloc-hdr
utils/external/firepython/__init__.py
Python
mit
414
[ "VisIt" ]
a60e53ea000481a3b43379a1eabfa762b2061167e01ac1622d044f54cfbeab7a
# -*- coding: utf-8 -*- # # pulsepacket.py # # This file is part of NEST. # # Copyright (C) 2004 The NEST Initiative # # NEST is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # NEST is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with NEST. If not, see <http://www.gnu.org/licenses/>. """ Pulse packet example -------------------- This script compares the average and individual membrane potential excursions in response to a single pulse packet with an analytically acquired voltage trace (see: Diesmann [1]_) A pulse packet is a transient spike volley with a Gaussian rate profile. The user can specify the neural parameters, the parameters of the pulse-packet and the number of trials. References ~~~~~~~~~~ .. [1] Diesmann M. 2002. Dissertation. Conditions for stable propagation of synchronous spiking in cortical neural networks: Single neuron dynamics and network properties. http://d-nb.info/968772781/34. """ ############################################################################### # First, we import all necessary modules for simulation, analysis and # plotting. import scipy.special as sp import nest import numpy import matplotlib.pyplot as plt # Properties of pulse packet: a = 100 # number of spikes in one pulse packet sdev = 10. # width of pulse packet (ms) weight = 0.1 # PSP amplitude (mV) pulsetime = 500. # occurrence time (center) of pulse-packet (ms) # Network and neuron characteristics: n_neurons = 100 # number of neurons cm = 200. # membrane capacitance (pF) tau_s = 0.5 # synaptic time constant (ms) tau_m = 20. # membrane time constant (ms) V0 = 0.0 # resting potential (mV) Vth = numpy.inf # firing threshold, high value to avoid spiking # Simulation and analysis parameters: simtime = 1000. # how long we simulate (ms) simulation_resolution = 0.1 # (ms) sampling_resolution = 1. # for voltmeter (ms) convolution_resolution = 1. # for the analytics (ms) # Some parameters in base units. Cm = cm * 1e-12 # convert to Farad Weight = weight * 1e-12 # convert to Ampere Tau_s = tau_s * 1e-3 # convert to sec Tau_m = tau_m * 1e-3 # convert to sec Sdev = sdev * 1e-3 # convert to sec Convolution_resolution = convolution_resolution * 1e-3 # convert to sec ############################################################################### # This function calculates the membrane potential excursion in response # to a single input spike (the equation is given for example in Diesmann [1]_, # eq.2.3). # It expects: # # * ``Time``: a time array or a single time point (in sec) # * ``Tau_s`` and ``Tau_m``: the synaptic and the membrane time constant (in sec) # * ``Cm``: the membrane capacity (in Farad) # * ``Weight``: the synaptic weight (in Ampere) # # It returns the provoked membrane potential (in mV) def make_psp(Time, Tau_s, Tau_m, Cm, Weight): term1 = (1 / Tau_s - 1 / Tau_m) term2 = numpy.exp(-Time / Tau_s) term3 = numpy.exp(-Time / Tau_m) PSP = (Weight / Cm * numpy.exp(1) / Tau_s * (((-Time * term2) / term1) + (term3 - term2) / term1 ** 2)) return PSP * 1e3 ############################################################################### # This function finds the exact location of the maximum of the PSP caused by a # single input spike. The location is obtained by setting the first derivative # of the equation for the PSP (see ``make_psp()``) to zero. The resulting # equation can be expressed in terms of a `LambertW function`. # This function expects: # # * ``Tau_s`` and ``Tau_m``: the synaptic and membrane time constant (in sec) # # It returns the location of the maximum (in sec) def LambertWm1(x): # Using scipy to mimic the gsl_sf_lambert_Wm1 function. return sp.lambertw(x, k=-1 if x < 0 else 0).real def find_loc_pspmax(tau_s, tau_m): var = tau_m / tau_s lam = LambertWm1(-numpy.exp(-1 / var) / var) t_maxpsp = (-var * lam - 1) / var / (1 / tau_s - 1 / tau_m) * 1e-3 return t_maxpsp ############################################################################### # First, we construct a Gaussian kernel for a given standard derivation # (``sig``) and mean value (``mu``). In this case the standard derivation is # the width of the pulse packet (see [1]_). sig = Sdev mu = 0.0 x = numpy.arange(-4 * sig, 4 * sig, Convolution_resolution) term1 = 1 / (sig * numpy.sqrt(2 * numpy.pi)) term2 = numpy.exp(-(x - mu)**2 / (sig**2 * 2)) gauss = term1 * term2 * Convolution_resolution ############################################################################### # Second, we calculate the PSP of a neuron due to a single spiking input. # (see Diesmann 2002, eq. 2.3). # Since we do that in discrete time steps, we first construct an array # (``t_psp``) that contains the time points we want to consider. Then, the # function ``make_psp()`` (that creates the PSP) takes the time array as its # first argument. t_psp = numpy.arange(0, 10 * (Tau_m + Tau_s), Convolution_resolution) psp = make_psp(t_psp, Tau_s, Tau_m, Cm, Weight) ############################################################################### # Now, we want to normalize the PSP amplitude to one. We therefore have to # divide the PSP by its maximum ([1]_ sec 6.1). The function # ``find_loc_pspmax()`` returns the exact time point (``t_pspmax``) when we # expect the maximum to occur. The function ``make_psp()`` calculates the # corresponding PSP value, which is our PSP amplitude (``psp_amp``). t_pspmax = find_loc_pspmax(Tau_s, Tau_m) psp_amp = make_psp(t_pspmax, Tau_s, Tau_m, Cm, Weight) psp_norm = psp / psp_amp ############################################################################### # Now we have all ingredients to compute the membrane potential excursion # (`U`). This calculation implies a convolution of the Gaussian with the # normalized PSP (see [1]_, eq. 6.9). In order to avoid an offset in the # convolution, we need to add a pad of zeros on the left side of the # normalized PSP. Later on we want to compare our analytical results with the # simulation outcome. Therefore we need a time vector (`t_U`) with the correct # temporal resolution, which places the excursion of the potential at the # correct time. psp_norm = numpy.pad(psp_norm, [len(psp_norm) - 1, 1], mode='constant') U = a * psp_amp * numpy.convolve(gauss, psp_norm) ulen = len(U) t_U = (convolution_resolution * numpy.linspace(-ulen / 2., ulen / 2., ulen) + pulsetime + 1.) ############################################################################### # In this section we simulate a network of multiple neurons. # All these neurons receive an individual pulse packet that is drawn from a # Gaussian distribution. # # We reset the Kernel, define the simulation resolution and set the # verbosity using ``set_verbosity`` to suppress info messages. nest.ResetKernel() nest.set_verbosity("M_WARNING") nest.resolution = simulation_resolution ############################################################################### # Afterwards we create several neurons, the same amount of # pulse-packet-generators and a voltmeter. All these nodes/devices # have specific properties that are specified in device specific # dictionaries (here: `neuron_pars` for the neurons, `ppg_pars` # for the and pulse-packet-generators and `vm_pars` for the voltmeter). neuron_pars = { 'V_th': Vth, 'tau_m': tau_m, 'tau_syn_ex': tau_s, 'C_m': cm, 'E_L': V0, 'V_reset': V0, 'V_m': V0 } neurons = nest.Create('iaf_psc_alpha', n_neurons, neuron_pars) ppg_pars = { 'pulse_times': [pulsetime], 'activity': a, 'sdev': sdev } ppgs = nest.Create('pulsepacket_generator', n_neurons, ppg_pars) vm_pars = {'interval': sampling_resolution} vm = nest.Create('voltmeter', params=vm_pars) ############################################################################### # Now, we connect each pulse generator to one neuron via static synapses. # We use the default static synapse, with specified weight. # The command ``Connect`` connects all kinds of nodes/devices. Since multiple # nodes/devices can be connected in different ways e.g., each source connects # to all targets, each source connects to a subset of targets or each source # connects to exactly one target, we have to specify the connection. In our # case we use the ``one_to_one`` connection routine since we connect one pulse # generator (source) to one neuron (target). # In addition we also connect the `voltmeter` to the `neurons`. nest.Connect(ppgs, neurons, 'one_to_one', syn_spec={'weight': weight}) nest.Connect(vm, neurons, syn_spec={'weight': weight}) ############################################################################### # In the next step we run the simulation for a given duration in ms. nest.Simulate(simtime) ############################################################################### # Finally, we record the membrane potential, when it occurred and to which # neuron it belongs. The sender and the time point of a voltage # data point at position x in the voltage array (``V_m``), can be found at the # same position x in the sender (`senders`) and the time array (`times`). Vm = vm.get('events', 'V_m') times = vm.get('events', 'times') senders = vm.get('events', 'senders') ############################################################################### # Here we plot the membrane potential derived from the theory and from the # simulation. Since we simulate multiple neurons that received slightly # different pulse packets, we plot the individual and the averaged membrane # potentials. # # We plot the analytical solution U (the resting potential V0 shifts the # membrane potential up or downwards). plt.plot(t_U, U + V0, 'r', lw=2, zorder=3, label='analytical solution') ############################################################################### # Then we plot all individual membrane potentials. # The time axes is the range of the simulation time in steps of ms. Vm_single = [Vm[senders == n.global_id] for n in neurons] simtimes = numpy.arange(1, simtime) for idn in range(n_neurons): if idn == 0: plt.plot(simtimes, Vm_single[idn], 'gray', zorder=1, label='single potentials') else: plt.plot(simtimes, Vm_single[idn], 'gray', zorder=1) ############################################################################### # Finally, we plot the averaged membrane potential. Vm_average = numpy.mean(Vm_single, axis=0) plt.plot(simtimes, Vm_average, 'b', lw=4, zorder=2, label='averaged potential') plt.legend() plt.xlabel('time (ms)') plt.ylabel('membrane potential (mV)') plt.xlim((-5 * (tau_m + tau_s) + pulsetime, 10 * (tau_m + tau_s) + pulsetime)) plt.show()
niltonlk/nest-simulator
pynest/examples/pulsepacket.py
Python
gpl-2.0
11,242
[ "Gaussian", "NEURON" ]
146da2b9fbbdb44fe1e55c039368275e178d89f688102cc437d864ecc6c37042
""" This module loads all the classes from the VTK Infovis library into its namespace. This is an optional module.""" from vtkInfovisPython import *
spthaolt/VTK
Wrapping/Python/vtk/infovis.py
Python
bsd-3-clause
151
[ "VTK" ]
edf0660d2b88780b5fa017507f623dc514d54a7d0142e17ba6ea6cdfe8f209f6
#!/usr/bin/env python # -*- coding: iso-8859-1 -*- # Documentation is intended to be processed by Epydoc. """ Introduction ============ The Munkres module provides an implementation of the Munkres algorithm (also called the Hungarian algorithm or the Kuhn-Munkres algorithm), useful for solving the Assignment Problem. Assignment Problem ================== Let *C* be an *n*\ x\ *n* matrix representing the costs of each of *n* workers to perform any of *n* jobs. The assignment problem is to assign jobs to workers in a way that minimizes the total cost. Since each worker can perform only one job and each job can be assigned to only one worker the assignments represent an independent set of the matrix *C*. One way to generate the optimal set is to create all permutations of the indexes necessary to traverse the matrix so that no row and column are used more than once. For instance, given this matrix (expressed in Python): .. python:: matrix = [[5, 9, 1], [10, 3, 2], [8, 7, 4]] You could use this code to generate the traversal indexes: .. python:: def permute(a, results): if len(a) == 1: results.insert(len(results), a) else: for i in range(0, len(a)): element = a[i] a_copy = [a[j] for j in range(0, len(a)) if j != i] subresults = [] permute(a_copy, subresults) for subresult in subresults: result = [element] + subresult results.insert(len(results), result) results = [] permute(range(len(matrix)), results) # [0, 1, 2] for a 3x3 matrix After the call to permute(), the results matrix would look like this:: [[0, 1, 2], [0, 2, 1], [1, 0, 2], [1, 2, 0], [2, 0, 1], [2, 1, 0]] You could then use that index matrix to loop over the original cost matrix and calculate the smallest cost of the combinations: .. python:: n = len(matrix) minval = sys.maxint for row in range(n): cost = 0 for col in range(n): cost += matrix[row][col] minval = min(cost, minval) print minval While this approach works fine for small matrices, it does not scale. It executes in O(*n*!) time: Calculating the permutations for an *n*\ x\ *n* matrix requires *n*! operations. For a 12x12 matrix, that's 479,001,600 traversals. Even if you could manage to perform each traversal in just one millisecond, it would still take more than 133 hours to perform the entire traversal. A 20x20 matrix would take 2,432,902,008,176,640,000 operations. At an optimistic millisecond per operation, that's more than 77 million years. The Munkres algorithm runs in O(*n*\ ^3) time, rather than O(*n*!). This package provides an implementation of that algorithm. This version is based on http://www.public.iastate.edu/~ddoty/HungarianAlgorithm.html. This version was written for Python by Brian Clapper from the (Ada) algorithm at the above web site. (The ``Algorithm::Munkres`` Perl version, in CPAN, was clearly adapted from the same web site.) Usage ===== Construct a Munkres object: .. python:: from munkres import Munkres m = Munkres() Then use it to compute the lowest cost assignment from a cost matrix. Here's a sample program: .. python:: from munkres import Munkres, print_matrix matrix = [[5, 9, 1], [10, 3, 2], [8, 7, 4]] m = Munkres() indexes = m.compute(matrix) print_matrix('Lowest cost through this matrix:', matrix) total = 0 for row, column in indexes: value = matrix[row][column] total += value print '(%d, %d) -> %d' % (row, column, value) print 'total cost: %d' % total Running that program produces:: Lowest cost through this matrix: [5, 9, 1] [10, 3, 2] [8, 7, 4] (0, 0) -> 5 (1, 1) -> 3 (2, 2) -> 4 total cost=12 The instantiated Munkres object can be used multiple times on different matrices. Non-square Cost Matrices ======================== The Munkres algorithm assumes that the cost matrix is square. However, it's possible to use a rectangular matrix if you first pad it with 0 values to make it square. This module automatically pads rectangular cost matrices to make them square. Notes: - The module operates on a *copy* of the caller's matrix, so any padding will not be seen by the caller. - The cost matrix must be rectangular or square. An irregular matrix will *not* work. Calculating Profit, Rather than Cost ==================================== The cost matrix is just that: A cost matrix. The Munkres algorithm finds the combination of elements (one from each row and column) that results in the smallest cost. It's also possible to use the algorithm to maximize profit. To do that, however, you have to convert your profit matrix to a cost matrix. The simplest way to do that is to subtract all elements from a large value. For example: .. python:: from munkres import Munkres, print_matrix matrix = [[5, 9, 1], [10, 3, 2], [8, 7, 4]] cost_matrix = [] for row in matrix: cost_row = [] for col in row: cost_row += [sys.maxint - col] cost_matrix += [cost_row] m = Munkres() indexes = m.compute(cost_matrix) print_matrix('Lowest cost through this matrix:', matrix) total = 0 for row, column in indexes: value = matrix[row][column] total += value print '(%d, %d) -> %d' % (row, column, value) print 'total profit=%d' % total Running that program produces:: Highest profit through this matrix: [5, 9, 1] [10, 3, 2] [8, 7, 4] (0, 1) -> 9 (1, 0) -> 10 (2, 2) -> 4 total profit=23 The ``munkres`` module provides a convenience method for creating a cost matrix from a profit matrix. Since it doesn't know whether the matrix contains floating point numbers, decimals, or integers, you have to provide the conversion function; but the convenience method takes care of the actual creation of the cost matrix: .. python:: import munkres cost_matrix = munkres.make_cost_matrix(matrix, lambda cost: sys.maxint - cost) So, the above profit-calculation program can be recast as: .. python:: from munkres import Munkres, print_matrix, make_cost_matrix matrix = [[5, 9, 1], [10, 3, 2], [8, 7, 4]] cost_matrix = make_cost_matrix(matrix, lambda cost: sys.maxint - cost) m = Munkres() indexes = m.compute(cost_matrix) print_matrix('Lowest cost through this matrix:', matrix) total = 0 for row, column in indexes: value = matrix[row][column] total += value print '(%d, %d) -> %d' % (row, column, value) print 'total profit=%d' % total References ========== 1. http://www.public.iastate.edu/~ddoty/HungarianAlgorithm.html 2. Harold W. Kuhn. The Hungarian Method for the assignment problem. *Naval Research Logistics Quarterly*, 2:83-97, 1955. 3. Harold W. Kuhn. Variants of the Hungarian method for assignment problems. *Naval Research Logistics Quarterly*, 3: 253-258, 1956. 4. Munkres, J. Algorithms for the Assignment and Transportation Problems. *Journal of the Society of Industrial and Applied Mathematics*, 5(1):32-38, March, 1957. 5. http://en.wikipedia.org/wiki/Hungarian_algorithm Copyright and License ===================== Copyright © 2008 Brian M. Clapper This is free software, released under the following BSD-like license: Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. The end-user documentation included with the redistribution, if any, must include the following acknowlegement: This product includes software developed by Brian M. Clapper (bmc@clapper.org, http://www.clapper.org/bmc/). That software is copyright (c) 2008 Brian M. Clapper. Alternately, this acknowlegement may appear in the software itself, if and wherever such third-party acknowlegements normally appear. THIS SOFTWARE IS PROVIDED **AS IS**, AND ANY EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL BRIAN M. CLAPPER BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ``$Id: munkres.py 1269 2010-03-10 05:03:59Z emarsi $`` """ __docformat__ = 'restructuredtext' # --------------------------------------------------------------------------- # Imports # --------------------------------------------------------------------------- import sys # --------------------------------------------------------------------------- # Exports # --------------------------------------------------------------------------- __all__ = ['Munkres', 'make_cost_matrix'] # --------------------------------------------------------------------------- # Globals # --------------------------------------------------------------------------- # Info about the module __version__ = "1.0.5.2" __author__ = "Brian Clapper, bmc@clapper.org" __url__ = "http://www.clapper.org/software/python/munkres/" __copyright__ = "(c) 2008 Brian M. Clapper" __license__ = "BSD-style license" # --------------------------------------------------------------------------- # Classes # --------------------------------------------------------------------------- class Munkres: """ Calculate the Munkres solution to the classical assignment problem. See the module documentation for usage. """ def __init__(self): """Create a new instance""" self.C = None self.row_covered = [] self.col_covered = [] self.n = 0 self.Z0_r = 0 self.Z0_c = 0 self.marked = None self.path = None def make_cost_matrix(profit_matrix, inversion_function): """ **DEPRECATED** Please use the module function ``make_cost_matrix()``. """ import munkres return munkres.make_cost_matrix(profit_matrix, inversion_function) make_cost_matrix = staticmethod(make_cost_matrix) def pad_matrix(self, matrix, pad_value=0): """ Pad a possibly non-square matrix to make it square. :Parameters: matrix : list of lists matrix to pad pad_value : int value to use to pad the matrix :rtype: list of lists :return: a new, possibly padded, matrix """ max_columns = 0 total_rows = len(matrix) for row in matrix: max_columns = max(max_columns, len(row)) total_rows = max(max_columns, total_rows) new_matrix = [] for row in matrix: row_len = len(row) new_row = row[:] if total_rows > row_len: # Row too short. Pad it. new_row += [0] * (total_rows - row_len) new_matrix += [new_row] while len(new_matrix) < total_rows: new_matrix += [[0] * total_rows] return new_matrix def compute(self, cost_matrix): """ Compute the indexes for the lowest-cost pairings between rows and columns in the database. Returns a list of (row, column) tuples that can be used to traverse the matrix. :Parameters: cost_matrix : list of lists The cost matrix. If this cost matrix is not square, it will be padded with zeros, via a call to ``pad_matrix()``. (This method does *not* modify the caller's matrix. It operates on a copy of the matrix.) **WARNING**: This code handles square and rectangular matrices. It does *not* handle irregular matrices. :rtype: list :return: A list of ``(row, column)`` tuples that describe the lowest cost path through the matrix """ self.C = self.pad_matrix(cost_matrix) self.n = len(self.C) self.original_length = len(cost_matrix) self.original_width = len(cost_matrix[0]) self.row_covered = [False for i in range(self.n)] self.col_covered = [False for i in range(self.n)] self.Z0_r = 0 self.Z0_c = 0 self.path = self.__make_matrix(self.n * 2, 0) self.marked = self.__make_matrix(self.n, 0) done = False step = 1 steps = { 1 : self.__step1, 2 : self.__step2, 3 : self.__step3, 4 : self.__step4, 5 : self.__step5, 6 : self.__step6 } while not done: try: func = steps[step] step = func() except KeyError: done = True # Look for the starred columns results = [] for i in range(self.original_length): for j in range(self.original_width): if self.marked[i][j] == 1: results += [(i, j)] return results def __copy_matrix(self, matrix): """Return an exact copy of the supplied matrix""" return copy.deepcopy(matrix) def __make_matrix(self, n, val): """Create an *n*x*n* matrix, populating it with the specific value.""" matrix = [] for i in range(n): matrix += [[val for j in range(n)]] return matrix def __step1(self): """ For each row of the matrix, find the smallest element and subtract it from every element in its row. Go to Step 2. """ C = self.C n = self.n for i in range(n): minval = min(self.C[i]) # Find the minimum value for this row and subtract that minimum # from every element in the row. for j in range(n): self.C[i][j] -= minval return 2 def __step2(self): """ Find a zero (Z) in the resulting matrix. If there is no starred zero in its row or column, star Z. Repeat for each element in the matrix. Go to Step 3. """ n = self.n for i in range(n): for j in range(n): if (self.C[i][j] == 0) and \ (not self.col_covered[j]) and \ (not self.row_covered[i]): self.marked[i][j] = 1 self.col_covered[j] = True self.row_covered[i] = True self.__clear_covers() return 3 def __step3(self): """ Cover each column containing a starred zero. If K columns are covered, the starred zeros describe a complete set of unique assignments. In this case, Go to DONE, otherwise, Go to Step 4. """ n = self.n count = 0 for i in range(n): for j in range(n): if self.marked[i][j] == 1: self.col_covered[j] = True count += 1 if count >= n: step = 7 # done else: step = 4 return step def __step4(self): """ Find a noncovered zero and prime it. If there is no starred zero in the row containing this primed zero, Go to Step 5. Otherwise, cover this row and uncover the column containing the starred zero. Continue in this manner until there are no uncovered zeros left. Save the smallest uncovered value and Go to Step 6. """ step = 0 done = False row = -1 col = -1 star_col = -1 while not done: (row, col) = self.__find_a_zero() if row < 0: done = True step = 6 else: self.marked[row][col] = 2 star_col = self.__find_star_in_row(row) if star_col >= 0: col = star_col self.row_covered[row] = True self.col_covered[col] = False else: done = True self.Z0_r = row self.Z0_c = col step = 5 return step def __step5(self): """ Construct a series of alternating primed and starred zeros as follows. Let Z0 represent the uncovered primed zero found in Step 4. Let Z1 denote the starred zero in the column of Z0 (if any). Let Z2 denote the primed zero in the row of Z1 (there will always be one). Continue until the series terminates at a primed zero that has no starred zero in its column. Unstar each starred zero of the series, star each primed zero of the series, erase all primes and uncover every line in the matrix. Return to Step 3 """ count = 0 path = self.path path[count][0] = self.Z0_r path[count][1] = self.Z0_c done = False while not done: row = self.__find_star_in_col(path[count][1]) if row >= 0: count += 1 path[count][0] = row path[count][1] = path[count-1][1] else: done = True if not done: col = self.__find_prime_in_row(path[count][0]) count += 1 path[count][0] = path[count-1][0] path[count][1] = col self.__convert_path(path, count) self.__clear_covers() self.__erase_primes() return 3 def __step6(self): """ Add the value found in Step 4 to every element of each covered row, and subtract it from every element of each uncovered column. Return to Step 4 without altering any stars, primes, or covered lines. """ minval = self.__find_smallest() for i in range(self.n): for j in range(self.n): if self.row_covered[i]: self.C[i][j] += minval if not self.col_covered[j]: self.C[i][j] -= minval return 4 def __find_smallest(self): """Find the smallest uncovered value in the matrix.""" minval = sys.maxint for i in range(self.n): for j in range(self.n): if (not self.row_covered[i]) and (not self.col_covered[j]): if minval > self.C[i][j]: minval = self.C[i][j] return minval def __find_a_zero(self): """Find the first uncovered element with value 0""" row = -1 col = -1 i = 0 n = self.n done = False while not done: j = 0 while True: if (self.C[i][j] == 0) and \ (not self.row_covered[i]) and \ (not self.col_covered[j]): row = i col = j done = True j += 1 if j >= n: break i += 1 if i >= n: done = True return (row, col) def __find_star_in_row(self, row): """ Find the first starred element in the specified row. Returns the column index, or -1 if no starred element was found. """ col = -1 for j in range(self.n): if self.marked[row][j] == 1: col = j break return col def __find_star_in_col(self, col): """ Find the first starred element in the specified row. Returns the row index, or -1 if no starred element was found. """ row = -1 for i in range(self.n): if self.marked[i][col] == 1: row = i break return row def __find_prime_in_row(self, row): """ Find the first prime element in the specified row. Returns the column index, or -1 if no starred element was found. """ col = -1 for j in range(self.n): if self.marked[row][j] == 2: col = j break return col def __convert_path(self, path, count): for i in range(count+1): if self.marked[path[i][0]][path[i][1]] == 1: self.marked[path[i][0]][path[i][1]] = 0 else: self.marked[path[i][0]][path[i][1]] = 1 def __clear_covers(self): """Clear all covered matrix cells""" for i in range(self.n): self.row_covered[i] = False self.col_covered[i] = False def __erase_primes(self): """Erase all prime markings""" for i in range(self.n): for j in range(self.n): if self.marked[i][j] == 2: self.marked[i][j] = 0 # --------------------------------------------------------------------------- # Functions # --------------------------------------------------------------------------- def make_cost_matrix(profit_matrix, inversion_function): """ Create a cost matrix from a profit matrix by calling 'inversion_function' to invert each value. The inversion function must take one numeric argument (of any type) and return another numeric argument which is presumed to be the cost inverse of the original profit. This is a static method. Call it like this: .. python:: cost_matrix = Munkres.make_cost_matrix(matrix, inversion_func) For example: .. python:: cost_matrix = Munkres.make_cost_matrix(matrix, lambda x : sys.maxint - x) :Parameters: profit_matrix : list of lists The matrix to convert from a profit to a cost matrix inversion_function : function The function to use to invert each entry in the profit matrix :rtype: list of lists :return: The converted matrix """ cost_matrix = [] for row in profit_matrix: cost_matrix.append([inversion_function(value) for value in row]) return cost_matrix def print_matrix(matrix, msg=None): """ Convenience function: Displays the contents of a matrix of integers. :Parameters: matrix : list of lists Matrix to print msg : str Optional message to print before displaying the matrix """ import math if msg is not None: print msg # Calculate the appropriate format width. width = 0 for row in matrix: for val in row: width = max(width, int(math.log10(val)) + 1) # Make the format string format = '%%%dd' % width # Print the matrix for row in matrix: sep = '[' for val in row: sys.stdout.write(sep + format % val) sep = ', ' sys.stdout.write(']\n') # --------------------------------------------------------------------------- # Main # --------------------------------------------------------------------------- if __name__ == '__main__': matrices = [ # Square ([[400, 150, 400], [400, 450, 600], [300, 225, 300]], 850 # expected cost ), # Rectangular variant ([[400, 150, 400, 1], [400, 450, 600, 2], [300, 225, 300, 3]], 452 # expected cost ), # Square ([[10, 10, 8], [ 9, 8, 1], [ 9, 7, 4]], 18 ), # Rectangular variant ([[10, 10, 8, 11], [ 9, 8, 1, 1], [ 9, 7, 4, 10]], 15 ), ] m = Munkres() for cost_matrix, expected_total in matrices: print_matrix(cost_matrix, msg='cost matrix') indexes = m.compute(cost_matrix) total_cost = 0 for r, c in indexes: x = cost_matrix[r][c] total_cost += x print '(%d, %d) -> %d' % (r, c, x) print 'lowest cost=%d' % total_cost assert expected_total == total_cost
emsrc/daeso-framework
lib/daeso/thirdparty/munkres.py
Python
gpl-3.0
25,007
[ "Brian" ]
970e0c518005d9fccae97e9d8e519127a7654862346147e9957ae489d3d22d78
import re import requests from datetime import date from .constants import DEFAULT_PAGE_SIZE, MYOB_BASE_URL from .endpoints import CRUD, METHOD_MAPPING, METHOD_ORDER from .exceptions import ( MyobBadRequest, MyobExceptionUnknown, MyobForbidden, MyobGatewayTimeout, MyobNotFound, MyobRateLimitExceeded, MyobUnauthorized, ) class Manager: def __init__(self, name, credentials, company_id=None, endpoints=[], raw_endpoints=[]): self.credentials = credentials self.name = '_'.join(p for p in name.rstrip('/').split('/') if '[' not in p) self.base_url = MYOB_BASE_URL if company_id is not None: self.base_url += company_id + '/' if name: self.base_url += name self.method_details = {} self.company_id = company_id # Build ORM methods from given url endpoints. for method, base, name in endpoints: if method == CRUD: for m in METHOD_ORDER: self.build_method( m, METHOD_MAPPING[m]['endpoint'](base), METHOD_MAPPING[m]['hint'](name), ) else: self.build_method( method, METHOD_MAPPING[method]['endpoint'](base), METHOD_MAPPING[method]['hint'](name), ) # Build raw methods (ones where we don't want to tinker with the endpoint or hint) for method, endpoint, hint in raw_endpoints: self.build_method(method, endpoint, hint) def build_method(self, method, endpoint, hint): full_endpoint = self.base_url + endpoint url_keys = re.findall(r'\[([^\]]*)\]', full_endpoint) template = full_endpoint.replace('[', '{').replace(']', '}') required_kwargs = url_keys.copy() if method in ('PUT', 'POST'): required_kwargs.append('data') def inner(*args, timeout=None, **kwargs): if args: raise AttributeError("Unnamed args provided. Only keyword args accepted.") # Ensure all required url kwargs have been provided. missing_kwargs = set(required_kwargs) - set(kwargs.keys()) if missing_kwargs: raise KeyError("Missing kwargs %s. Endpoint requires %s." % ( list(missing_kwargs), required_kwargs )) # Parse kwargs. url_kwargs = {} request_kwargs_raw = {} for k, v in kwargs.items(): if k in url_keys: url_kwargs[k] = v elif k != 'data': request_kwargs_raw[k] = v # Determine request method. request_method = 'GET' if method == 'ALL' else method # Build url. url = template.format(**url_kwargs) # Build request kwargs (header/query/body) request_kwargs = self.build_request_kwargs(request_method, data=kwargs.get('data'), **request_kwargs_raw) response = requests.request(request_method, url, timeout=timeout, **request_kwargs) if response.status_code == 200: # We don't want to be deserialising binary responses.. if not response.headers.get('content-type', '').startswith('application/json'): return response.content return response.json() elif response.status_code == 201: return response.json() elif response.status_code == 400: raise MyobBadRequest(response) elif response.status_code == 401: raise MyobUnauthorized(response) elif response.status_code == 403: if response.json()['Errors'][0]['Name'] == 'RateLimitError': raise MyobRateLimitExceeded(response) raise MyobForbidden(response) elif response.status_code == 404: raise MyobNotFound(response) elif response.status_code == 504: raise MyobGatewayTimeout(response) else: raise MyobExceptionUnknown(response) # Build method name method_name = '_'.join(p for p in endpoint.rstrip('/').split('/') if '[' not in p).lower() # If it has no name, use method. if not method_name: method_name = method.lower() # If it already exists, prepend with method to disambiguate. elif hasattr(self, method_name): method_name = '%s_%s' % (method.lower(), method_name) self.method_details[method_name] = { 'kwargs': required_kwargs, 'hint': hint, } setattr(self, method_name, inner) def build_request_kwargs(self, method, data=None, **kwargs): request_kwargs = {} # Build headers. if self.company_id: try: companyfile_credentials = self.credentials.companyfile_credentials[self.company_id] except KeyError: raise KeyError('There are no stored username-password credentials for this company id.') else: companyfile_credentials = '' request_kwargs['headers'] = { 'Authorization': 'Bearer %s' % self.credentials.oauth_token, 'x-myobapi-cftoken': companyfile_credentials, 'x-myobapi-key': self.credentials.consumer_key, 'x-myobapi-version': 'v2', } if 'headers' in kwargs: request_kwargs['headers'].update(kwargs['headers']) # Build query. request_kwargs['params'] = {} filters = [] def build_value(value): if issubclass(type(value), date): return "datetime'%s'" % value if isinstance(value, bool): return str(value).lower() return "'%s'" % value if 'raw_filter' in kwargs: filters.append(kwargs['raw_filter']) for k, v in kwargs.items(): if k not in ['orderby', 'format', 'headers', 'page', 'limit', 'templatename', 'timeout', 'raw_filter']: operator = 'eq' for op in ['lt', 'gt']: if k.endswith('__%s' % op): k = k[:-4] operator = op if not isinstance(v, (list, tuple)): v = [v] filters.append(' or '.join("%s %s %s" % (k, operator, build_value(v_)) for v_ in v)) if filters: request_kwargs['params']['$filter'] = ' and '.join('(%s)' % f for f in filters) if 'orderby' in kwargs: request_kwargs['params']['$orderby'] = kwargs['orderby'] page_size = DEFAULT_PAGE_SIZE if 'limit' in kwargs: page_size = int(kwargs['limit']) request_kwargs['params']['$top'] = page_size if 'page' in kwargs: request_kwargs['params']['$skip'] = (int(kwargs['page']) - 1) * page_size if 'format' in kwargs: request_kwargs['params']['format'] = kwargs['format'] if 'templatename' in kwargs: request_kwargs['params']['templatename'] = kwargs['templatename'] if method in ('PUT', 'POST'): request_kwargs['params']['returnBody'] = 'true' # Build body. if data is not None: request_kwargs['json'] = data return request_kwargs def __repr__(self): def print_method(name, args): return '%s(%s)' % (name, ', '.join(args)) formatstr = '%%%is - %%s' % max( len(print_method(k, v['kwargs'])) for k, v in self.method_details.items() ) return '%s%s:\n %s' % (self.name, self.__class__.__name__, '\n '.join( formatstr % ( print_method(k, v['kwargs']), v['hint'], ) for k, v in sorted(self.method_details.items()) ))
ABASystems/pymyob
myob/managers.py
Python
bsd-3-clause
8,057
[ "TINKER" ]
0d87b228f07a0e1886527ac3fc5c4bba90c2221f24f74ec1dd81cfd71f026733
""" The common code for a gradient editor for `tvtk.LookupTables` and `tvtk.VolumeProperty` color transfer functions. Most of the code is independent of tvtk however. The toolkit specific code is in toolkit specific files. This code is distributed under the conditions of the BSD license. This code was originally written by Gerald Knizia <cgk.d@gmx.net> and later modified by Prabhu Ramachandran for tvtk and MayaVi2. Copyright (c) 2005-2013, Gerald Knizia and Prabhu Ramachandran """ from os.path import splitext from tvtk.api import tvtk ########################################################################## # Utility functions. ########################################################################## def lerp(arg0,arg1,f): """linearly interpolate between arguments arg0 and arg1. The weight f is from [0..1], with f=0 giving arg0 and f=1 giving arg1""" return (1-f)*arg0 + f*arg1 def rgba_to_hsva(r,g,b,a): """Convert color from RGBA to HSVA. input: r,g,b,a are from [0..1] output: h,s,v,a are from [0..1] (h will never be 1.0) See http://en.wikipedia.org/wiki/HSV_color_space Only difference: hue range is [0..1) here, not [0..360).""" max_comp = max((r,g,b)) min_comp = min((r,g,b)) h = 1.0/6.0 #60.0 if ( max_comp != min_comp ): if ( r >= g) and ( r >= b ): h *= 0 + (g-b)/(max_comp-min_comp) elif ( g >= b ): h *= 2 + (b-r)/(max_comp-min_comp) else: h *= 4 + (r-g)/(max_comp-min_comp) if h < 0: h += 1.0 if h > 1.0: h -= 1.0 if ( max_comp != 0 ): s = ( max_comp - min_comp )/max_comp else: s = 0 v = max_comp return (h,s,v,a) def hsva_to_rgba(h_,s,v,a): """Convert color from HSVA to RGBA. input: h,s,v,a are from [0..1] output: r,g,b,a are from [0..1] See http://en.wikipedia.org/wiki/HSV_color_space Only difference: hue range is [0..1) here, not [0..360).""" (r,g,b,a) = (v,v,v,a) h = h_ * 360.0 if ( s < 1e-4 ): return (r,g,b,a)#zero saturation -> color acromatic hue_slice_index = int(h/60.0) hue_partial = h/60.0 - hue_slice_index p = v * ( 1 - s ) q = v * ( 1 - hue_partial * s ) t = v * ( 1 - (1-hue_partial) * s ) if ( 0 == hue_slice_index ): r, g, b = v, t, p elif ( 1 == hue_slice_index ): r, g, b = q, v, p elif ( 2 == hue_slice_index ): r, g, b = p, v, t elif ( 3 == hue_slice_index ): r, g, b = p, q, v elif ( 4 == hue_slice_index ): r, g, b = t, p, v elif ( 5 == hue_slice_index ): r, g, b = v, p, q return (r,g,b,a) ########################################################################## # `Color` class. ########################################################################## class Color: """Represents a color and provides means of automatic conversion between HSV(A) and RGB(A) color spaces. The color is stored in HSVA space.""" def __init__(self): self.hsva = (0.0, 0.0, 0.5, 1.0) def set_rgb(self,r,g,b): self.set_rgba(r,g,b,1.0) def set_rgba(self,r,g,b,a): self.hsva = rgba_to_hsva(r,g,b,a) def get_rgb255(self): """returns a tuple (r,g,b) of 3 integers in range [0..255] representing the color.""" rgba = self.get_rgba() return (int(rgba[0]*255), int(rgba[1]*255), int(rgba[2]*255) ) def get_rgba(self): h,s,v,a = self.hsva return hsva_to_rgba(h,s,v,a) def get_hsva(self): return self.hsva def set_hsva(self,h,s,v,a): self.hsva = (h,s,v,a) def set_lerp(self, f,A,B): """Set self to result of linear interpolation between colors A and B in HSVA space. The weight f is from [0..1], with f=0 giving A and f=1 giving color B.""" h = lerp(A.hsva[0], B.hsva[0], f) s = lerp(A.hsva[1], B.hsva[1], f) v = lerp(A.hsva[2], B.hsva[2], f) a = lerp(A.hsva[3], B.hsva[3], f) self.hsva = (h,s,v,a) ########################################################################## # `ColorControlPoint` class. ########################################################################## class ColorControlPoint: """A control point represents a fixed position in the gradient and its assigned color. A control point can have indifferent color channels in hsv space, i.e. channels, on which its presence does not impose any effect.""" def __init__(self, active_channels, fixed=False): self.color = Color() # position in the gradient table. range: [0..1]. self.pos = 0.0 # fixed control points can not be moved to other positions. The # control points for the begin and the end of the gradient are usually # the only fixed control points. self.fixed = fixed if ( 'a' != active_channels ): self.active_channels = "rgb" self.activate_channels(active_channels) else: self.active_channels = "a" def activate_channels(self,new_channels): """NewChannels: string consisting of the new color channel names""" for c in new_channels: if ( not ( c in self.active_channels ) ): self.active_channels += c def set_pos(self,f): self.pos = max(min(f,1.0), 0.0) ########################################################################## # `GradientTableOld` class. ########################################################################## class GradientTableOld: """this class represents a logical gradient table, i.e. an array of colors and the means to control it via control points""" def __init__( self, num_entries ): self.size = num_entries self.table = [[0.0]*self.size, [0.0]*self.size, [0.0]*self.size, [0.0]*self.size] self.table_hsva = [[0.0]*self.size, [0.0]*self.size, [0.0]*self.size, [0.0]*self.size] # ^- table[channel][index]: rgba values of the colors of the table. # range: [0..1]^4. # insert the control points for the left and the right end of the # gradient. These are fixed (i.e. cannot be moved or deleted) and # allow one to set begin and end colors. left_control_point = ColorControlPoint(fixed=True, active_channels="hsva") left_control_point.set_pos(0.0) left_control_point.color.set_rgb(0.0, 0.0, 0.0) right_control_point = ColorControlPoint(fixed=True, active_channels="hsva") right_control_point.set_pos(1.0) right_control_point.color.set_rgb(1.0, 1.0, 1.0) self.control_points = [left_control_point, right_control_point] # note: The array of control points always has to be sorted by gradient # position of the control points. # insert another control point. This one has no real function, it # is just there to make the gradient editor more colorful initially # and suggest to the (first time user) that it is actually possible to # place more control points. mid_control_point = ColorControlPoint(active_channels="hsv") mid_control_point.set_pos(0.4) mid_control_point.color.set_rgb(1.0,0.4,0.0) self.insert_control_point( mid_control_point ) # it is possible to scale the output gradient using a nonlinear function # which maps [0..1] to [0..1], aviable using the "nonlin" option in the # gui. Per default, this option is disabled however. self.scaling_function_string = "" # will receive the function string if # set, e.g. "x**(4*a)" self.scaling_function_parameter = 0.5 # the parameter a, slider controlled self.scaling_function = None # the actual function object. takes one # position parameter. None if disabled. self.update() def get_color_hsva(self,idx): """return (h,s,v,a) tuple in self.table_hsva for index idx""" return (self.table_hsva[0][idx],self.table_hsva[1][idx], self.table_hsva[2][idx],self.table_hsva[3][idx]) def get_color(self,idx): """return (r,g,b,a) tuple in self.table for index idx""" return (self.table[0][idx],self.table[1][idx], self.table[2][idx],self.table[3][idx]) def set_color_hsva(self,idx,hsva_color): """set hsva table entry for index idx to hsva_color, which must be (h,s,v,a)""" self.table_hsva[0][idx] = hsva_color[0] self.table_hsva[1][idx] = hsva_color[1] self.table_hsva[2][idx] = hsva_color[2] self.table_hsva[3][idx] = hsva_color[3] def set_color(self,idx,rgba_color): """set rgba table entry for index idx to rgba_color, which must be (r,g,b,a)""" self.table[0][idx] = rgba_color[0] self.table[1][idx] = rgba_color[1] self.table[2][idx] = rgba_color[2] self.table[3][idx] = rgba_color[3] def get_pos_index(self,f): """return index in .table of gradient position f \in [0..1]""" return int(f*(self.size-1)) def get_index_pos(self,idx): """return position f \in [0..1] of gradient table index idx""" return (1.0*idx)/(self.size-1) def get_pos_color(self,f): """return a Color object representing the color which is lies at position f \in [0..1] in the current gradient""" result = Color() #e = self.table_hsva[:,self.get_pos_index(f)] e = self.get_color_hsva(self.get_pos_index(f)) result.set_hsva(e[0], e[1], e[2], e[3]) return result def get_pos_rgba_color_lerped(self,f): """return a (r,g,b,a) color representing the color which is lies at position f \in [0..1] in the current gradient. if f is outside the [0..1] interval, the result will be clamped to this interval""" scaled_pos = max(min(f,1.0), 0.0)*(self.size-1) idx0 = int(scaled_pos) fraction = scaled_pos - idx0 idx1 = min( self.size - 1, 1 + idx0 ) r = lerp( self.table[0][idx0], self.table[0][idx1], fraction ) g = lerp( self.table[1][idx0], self.table[1][idx1], fraction ) b = lerp( self.table[2][idx0], self.table[2][idx1], fraction ) a = lerp( self.table[3][idx0], self.table[3][idx1], fraction ) return (r,g,b,a) def insert_control_point(self,new_point): """Insert a new control point into the table. Does sort the control points, but does NOT update the table.""" self.control_points += [new_point] self.sort_control_points() def sort_control_points(self): """Sort control points by position. Call this if the position of any control point was changed externally. The control point array always has to be sorted.""" def pred(x, y): if x < y: return -1 elif y < x: return +1 else: return 0 self.control_points.sort( lambda x, y: pred(x.pos, y.pos) ) def update(self): """Recalculate the gradient table from the control points. The colors are interpolated linearly between each two control points in hsva space. """ #self.Sortcontrol_points() control_point_indices_total = [] for point in self.control_points: control_point_indices_total.append((self.get_pos_index(point.pos),point)) # first, recalculate the Hsva table channel-wise from the control points for it in [("h",0),("s",1),("v",2),("a",3)]: # take into account only control points which are active # for the current channel control_point_indices = [ x for x in control_point_indices_total \ if it[0] in x[1].active_channels ] assert( len( control_point_indices ) >= 2 ) # we always interpolate between two adjacent control points on the # current channel. NextIntervalBeginIdx marks the first table index # on which the next set of control points is to be choosen. start_point_id = -1 end_point_id = 0 start_pos = 0 #dummy value end_pos = 0 #dummy value next_interval_begin_idx = 0 end_point = control_point_indices[0][1] assert( next_interval_begin_idx == 0 ) for k in range(self.size): while( k == next_interval_begin_idx ): # ^-- this loop makes sure that we won't attempt to # interpolate between two control points that lie on # each other. read "if" instead of "while". start_point_id += 1 end_point_id += 1 start_point = end_point start_pos = end_pos end_point = control_point_indices[end_point_id][1] end_pos = end_point.pos next_interval_begin_idx = 1+control_point_indices[end_point_id][0] # calculate float position of this entry in the gradient table # and (linear) position in the current gradient between the # two current control points cur_pos = self.get_index_pos(k) f = ( cur_pos - start_pos ) / ( end_pos - start_pos ) assert( ( 0 <= f ) and ( f <= 1 ) ) # ^-- this might happen when two control points lie on each # other. Since this case only occurs as an intermediate case # when dragging it is not really problematic. #f = min( 1.0, max( 0.0, f ) ) self.table_hsva[it[1]][k] = lerp(start_point.color.hsva[it[1]], end_point.color.hsva[it[1]], f) assert( next_interval_begin_idx == self.size ) # convert hsva colors to rgba for k in range(self.size): h,s,v,a = self.get_color_hsva(k) self.set_color(k, hsva_to_rgba(h, s, v, a)) def store_to_vtk_lookup_table(self, vtk_table, num_entries=256): """Store current color table in `vtk_table`, an instance of `tvtk.LookupTable`. """ vtk_table.number_of_table_values = num_entries scale_xform = lambda x:x if self.scaling_function: scale_xform = self.scaling_function for idx in range(num_entries): f = scale_xform(float(idx)/(num_entries-1)) rgba = self.get_pos_rgba_color_lerped(f) vtk_table.set_table_value( idx, rgba ) def store_to_vtk_volume_prop(self, volume_prop, scalar_range): """Given a `tvtk.VolumeProperty` and a scalar range to map values into, this sets the CTF based on the current control points. """ # FIXME: This method does not support scaling! ctf = volume_prop.rgb_transfer_function ctf.remove_all_points() otf = volume_prop.get_scalar_opacity() otf.remove_all_points() s1, s2 = scalar_range size = s2 - s1 for point in self.control_points: x = s1 + point.pos*size h, s, v, a = point.color.get_hsva() if point.active_channels != 'a': ctf.add_hsv_point(x, h, s, v) if 'a' in point.active_channels: otf.add_point(x, a) def load_from_vtk_volume_prop(self, volume_prop): """Given a vtkVolumeProperty, this initializes the control points of the gradient table. This works best when a ctf.ColorTransferFunction and PiecewiseFunction are used. Note that it is not as easy to setup the control points from a LUT because the LUT may end up having more than the size of the table editor here. It also usually does not make sense to do this with a LUT. """ # FIXME: This method does not support scaling! ctf = volume_prop.rgb_transfer_function otf = volume_prop.get_scalar_opacity() # We need a CTF with at least 2 points. size = ctf.size assert (size > 1) assert (otf.size > 1) s1, s2 = ctf.range scale = float(s2 - s1) ds = scale/(size -1) new_ctl_pts = [] has_nodes = False if hasattr(ctf, 'nodes'): has_nodes = True for i in range(size): if has_nodes: x = ctf.nodes[i] else: x = s1 + i*ds r, g, b = ctf.get_color(x) a = otf.get_value(x) if (i == 0) or (i == (size-1)): # First and last points are fixed. pt = ColorControlPoint(active_channels="hsva", fixed=True) else: pt = ColorControlPoint(active_channels="hsv", fixed=False) pt.color.set_rgba(r, g, b, a) pos = (x - s1)/scale pt.set_pos(pos) new_ctl_pts.append(pt) # The alpha values are indipendent of the hsv ones. size = otf.size ds = scale/(size -1) has_nodes = False if hasattr(ctf, 'nodes'): has_nodes = True for i in range(1, size-1): if has_nodes: x = otf.nodes[i] else: x = s1 + i*ds a = otf.get_value(x) r, g, b = ctf.get_color(x) pt = ColorControlPoint(active_channels="a", fixed=False) pt.color.set_rgba(r, g, b, a) pos = (x - s1)/scale pt.set_pos(pos) new_ctl_pts.append(pt) self.control_points = new_ctl_pts self.sort_control_points() self.update() def scaling_parameters_changed(self): """Recompile the scaling function.""" from math import tan, atan, cos, acos, sin, asin, pow, log, exp, e, pi self.scaling_function = None # let python generate a new function via the exec statement. to make # the security risk calculable, we execute that function in a local # scope. The downside is that we have to provide math functions # one at a time. def_string = "def ParamFn(x): return %s " % (self.scaling_function_string) dict = {"a":self.scaling_function_parameter, "ParamFn":None, "atan":atan, "tan":tan, "cos":cos, "acos":acos, "sin":sin, "asin":asin, "pow":pow, "log":log, "exp":exp, "e":e, "pi":pi } if ( "" == self.scaling_function_string ): return try: exec(def_string, dict) self.scaling_function = dict["ParamFn"] except: raise ValueError("failed to compile function: ", def_string ) def set_scaling_function_parameter(self,new_parameter): """Set the 'a' parameter of the scaling function""" self.scaling_function_parameter = new_parameter self.scaling_parameters_changed() def set_scaling_function(self,new_function_string): """Set scaling function. new_function_string is a string describing the function, e.g. 'x**(4*a)' """ self.scaling_function_string = new_function_string self.scaling_parameters_changed() def save(self, file_name): """Save control point set into a new file FileName. It is not checked whether the file already exists. Further writes out a VTK .lut file and a .jpg file showing the gradients.""" # Ensure that if the input file name had one of the extensions # we'll be writing out ourselves, it gets stripped out first. path_base,ext = splitext(file_name) #print(file_name) if ext.lower() in ['.lut','.jpg','.jpeg','.grad']: ext = '' file_name = path_base + ext # Create the three names for the files we'll be actually # writing out. file_name_grad = file_name + '.grad' file_name_lut = file_name + '.lut' file_name_jpg = file_name + '.jpg' # write control points set. file = open( file_name_grad, "w" ) file.write( "V 2.0 Color Gradient File\n" ) file.write( "ScalingFunction: %s\n" % (self.scaling_function_string) ) file.write( "ScalingParameter: %s\n" % (self.scaling_function_parameter) ) file.write( "ControlPoints: (pos fixed bindings h s v a)\n" ) for control_point in self.control_points: file.write( " %s %s %s %s %s %s %s\n" % ( \ control_point.pos, control_point.fixed, control_point.active_channels, control_point.color.get_hsva()[0], control_point.color.get_hsva()[1], control_point.color.get_hsva()[2], control_point.color.get_hsva()[3] ) ) file.close() # write vtk lookup table. Unfortunatelly these objects don't seem to # have any built in and exposed means of loading or saving them, so # we build the vtk file directly vtk_table = tvtk.LookupTable() self.store_to_vtk_lookup_table(vtk_table) file = open( file_name_lut, "w" ) num_colors = vtk_table.number_of_table_values file.write( "LOOKUP_TABLE UnnamedTable %s\n" % ( num_colors ) ) for idx in range(num_colors): entry = vtk_table.get_table_value(idx) file.write("%.4f %.4f %.4f %.4f\n" % (entry[0],entry[1],entry[2],entry[3])) file.close() # if the python image library is aviable, also generate a small .jpg # file showing how the gradient looks. Based on code from Arnd Baecker. try: import Image except ImportError: pass # we're ready otherwise. no jpg output tho. else: Ny=64 # vertical size of the jpeg im = Image.new("RGBA",(num_colors,Ny)) for nx in range(num_colors): (r,g,b,a) = vtk_table.get_table_value(nx) for ny in range(Ny): im.putpixel((nx,ny),(int(255*r),int(255*g),int(255*b), int(255*a))) im.save(file_name_jpg,"JPEG") # it might be better to store the gradient as .png file, as these # are actually able to store alpha components (unlike jpg files) # and might also lead to a better compression. def load(self, file_name): """Load control point set from file FileName and recalculate gradient table.""" file = open( file_name, "r" ) version_tag = file.readline() version = float(version_tag.split()[1])+1e-5 if ( version >= 1.1 ): # read in the scaling function and the scaling function parameter function_line_split = file.readline().split() parameter_line = file.readline() if ( len(function_line_split)==2 ): self.scaling_function_string = function_line_split[1] else: self.scaling_function_string = "" self.scaling_function_parameter = float(parameter_line.split()[1]) else: self.scaling_function_string = "" self.scaling_function_parameter = 0.5 file.readline() new_control_points = [] while True: cur_line = file.readline() if len(cur_line) == 0: # readline is supposed to return an empty string at EOF break args = cur_line.split() if ( len(args) < 7 ): msg = "gradient file format broken at line:\n" msg += cur_line raise ValueError(msg) new_point = ColorControlPoint(active_channels="") new_point.set_pos( float( args[0] ) ) new_point.fixed = "True" == args[1] #bool( args[1] ) new_point.active_channels = args[2] (h,s,v,a) = ( float(args[3]), float(args[4]), float(args[5]), float(args[6]) ) new_point.color.set_hsva(h,s,v,a) new_control_points.append(new_point) file.close() self.control_points = new_control_points self.sort_control_points() self.scaling_parameters_changed() self.update() ########################################################################## # `GradientTable` class. ########################################################################## class GradientTable: """this class represents a logical gradient table, i.e. an array of colors and the means to control it via control points This class (unlike the GradientTableOld) does not support scaling and uses VTK's ColorTransferFunction and PiecewiseFunction to perform the actual interpolation. """ def __init__( self, num_entries ): self.size = num_entries self.table = tvtk.ColorTransferFunction() try: self.table.range = (0.0, 1.0) except Exception: # VTK versions < 5.2 don't seem to need this. pass self.alpha = tvtk.PiecewiseFunction() # These VTK classes perform the interpolation for us. # insert the control points for the left and the right end of the # gradient. These are fixed (i.e. cannot be moved or deleted) and # allow one to set begin and end colors. left_control_point = ColorControlPoint(fixed=True, active_channels="hsva") left_control_point.set_pos(0.0) left_control_point.color.set_rgb(0.0, 0.0, 0.0) right_control_point = ColorControlPoint(fixed=True, active_channels="hsva") right_control_point.set_pos(1.0) right_control_point.color.set_rgb(1.0, 1.0, 1.0) self.control_points = [left_control_point, right_control_point] # note: The array of control points always has to be sorted by gradient # position of the control points. # insert another control point. This one has no real function, it # is just there to make the gradient editor more colorful initially # and suggest to the (first time user) that it is actually possible to # place more control points. mid_control_point = ColorControlPoint(active_channels="hsv") mid_control_point.set_pos(0.4) mid_control_point.color.set_rgb(1.0,0.4,0.0) self.insert_control_point( mid_control_point ) # These variables are only for compatibility with GradientTableOld. self.scaling_function_string = "" # will receive the function string if # set, e.g. "x**(4*a)" self.scaling_function_parameter = 0.5 # the parameter a, slider controlled self.scaling_function = None # the actual function object. takes one # position parameter. None if disabled. self.update() def get_color_hsva(self, f): """return (h,s,v,a) tuple in self.table_hsva for fraction f in [0,1].""" r, g, b = self.table.get_color(f) a = self.alpha.get_value(f) return rgba_to_hsva(r, g, b, a) def get_color(self, f): """return (r,g,b,a) tuple in self.table for fraction f in [0,1].""" r, g, b = self.table.get_color(f) a = self.alpha.get_value(f) return r, g, b, a def get_pos_color(self,f): """return a Color object representing the color which is lies at position f \in [0..1] in the current gradient""" result = Color() e = self.get_color_hsva(f) result.set_hsva(*e) return result def get_pos_rgba_color_lerped(self,f): """return a (r,g,b,a) color representing the color which is lies at position f \in [0..1] in the current gradient. if f is outside the [0..1] interval, the result will be clamped to this interval.""" return self.get_color(f) def insert_control_point(self,new_point): """Insert a new control point into the table. Does sort the control points, but does NOT update the table.""" self.control_points += [new_point] self.sort_control_points() def sort_control_points(self): """Sort control points by position. Call this if the position of any control point was changed externally. The control point array always has to be sorted.""" def pred(x, y): if x < y: return -1 elif y < x: return +1 else: return 0 self.control_points.sort( lambda x, y: pred(x.pos, y.pos) ) def update(self): """Recalculate the gradient table from the control points. The colors are interpolated linearly between each two control points in hsva space. """ #self.sort_control_points() table = self.table alpha = self.alpha table.remove_all_points() alpha.remove_all_points() for point in self.control_points: x = point.pos h, s, v, a = point.color.get_hsva() if point.active_channels != 'a': table.add_hsv_point(x, h, s, v) if 'a' in point.active_channels: alpha.add_point(x, a) def store_to_vtk_lookup_table(self, vtk_table, num_entries=256): """Store current color table in `vtk_table`, an instance of `tvtk.LookupTable`. """ vtk_table.number_of_table_values = num_entries for idx in range(num_entries): f = float(idx)/(num_entries-1) rgba = self.get_color(f) vtk_table.set_table_value( idx, rgba ) def store_to_vtk_volume_prop(self, volume_prop, scalar_range): """Given a `tvtk.VolumeProperty` and a scalar range to map values into, this sets the CTF based on the current control points. """ # FIXME: This method does not support scaling! ctf = volume_prop.rgb_transfer_function ctf.remove_all_points() otf = volume_prop.get_scalar_opacity() otf.remove_all_points() s1, s2 = scalar_range try: ctf.range = s1, s2 except Exception: # VTK versions < 5.2 don't seem to need this. pass size = s2 - s1 for point in self.control_points: x = s1 + point.pos*size h, s, v, a = point.color.get_hsva() if point.active_channels != 'a': ctf.add_hsv_point(x, h, s, v) if 'a' in point.active_channels: otf.add_point(x, a) def load_from_vtk_volume_prop(self, volume_prop): """Given a vtkVolumeProperty, this initializes the control points of the gradient table. This works best when a ctf.ColorTransferFunction and PiecewiseFunction are used. Note that it is not as easy to setup the control points from a LUT because the LUT may end up having more than the size of the table editor here. It also usually does not make sense to do this with a LUT. """ # FIXME: This method does not support scaling! ctf = volume_prop.rgb_transfer_function otf = volume_prop.get_scalar_opacity() # We need a CTF with at least 2 points. size = ctf.size assert (size > 1) assert (otf.size > 1) s1, s2 = ctf.range scale = float(s2 - s1) ds = scale/(size -1) new_ctl_pts = [] has_nodes = False if hasattr(ctf, 'nodes'): has_nodes = True for i in range(size): if has_nodes: x = ctf.nodes[i] else: x = s1 + i*ds r, g, b = ctf.get_color(x) a = otf.get_value(x) if (i == 0) or (i == (size-1)): # First and last points are fixed. pt = ColorControlPoint(active_channels="hsva", fixed=True) else: pt = ColorControlPoint(active_channels="hsv", fixed=False) pt.color.set_rgba(r, g, b, a) pos = (x - s1)/scale pt.set_pos(pos) new_ctl_pts.append(pt) # The alpha values are indipendent of the hsv ones. size = otf.size ds = scale/(size -1) has_nodes = False if hasattr(ctf, 'nodes'): has_nodes = True for i in range(1, size-1): if has_nodes: x = otf.nodes[i] else: x = s1 + i*ds a = otf.get_value(x) r, g, b = ctf.get_color(x) pt = ColorControlPoint(active_channels="a", fixed=False) pt.color.set_rgba(r, g, b, a) pos = (x - s1)/scale pt.set_pos(pos) new_ctl_pts.append(pt) self.control_points = new_ctl_pts self.sort_control_points() self.update() def scaling_parameters_changed(self): """Recompile the scaling function.""" raise NotImplementedError def set_scaling_function_parameter(self,new_parameter): """Set the 'a' parameter of the scaling function""" raise NotImplementedError def set_scaling_function(self,new_function_string): """Set scaling function. new_function_string is a string describing the function, e.g. 'x**(4*a)' """ raise NotImplementedError def save(self, file_name): """Save control point set into a new file FileName. It is not checked whether the file already exists. Further writes out a VTK .lut file and a .jpg file showing the gradients.""" # Ensure that if the input file name had one of the extensions # we'll be writing out ourselves, it gets stripped out first. path_base,ext = splitext(file_name) #print(file_name) if ext.lower() in ['.lut','.jpg','.jpeg','.grad']: ext = '' file_name = path_base + ext # Create the three names for the files we'll be actually # writing out. file_name_grad = file_name + '.grad' file_name_lut = file_name + '.lut' file_name_jpg = file_name + '.jpg' # write control points set. file = open( file_name_grad, "w" ) file.write( "V 2.0 Color Gradient File\n" ) file.write( "ScalingFunction: %s\n" % (self.scaling_function_string) ) file.write( "ScalingParameter: %s\n" % (self.scaling_function_parameter) ) file.write( "ControlPoints: (pos fixed bindings h s v a)\n" ) for control_point in self.control_points: file.write( " %s %s %s %s %s %s %s\n" % ( \ control_point.pos, control_point.fixed, control_point.active_channels, control_point.color.get_hsva()[0], control_point.color.get_hsva()[1], control_point.color.get_hsva()[2], control_point.color.get_hsva()[3] ) ) file.close() # write vtk lookup table. Unfortunatelly these objects don't seem to # have any built in and exposed means of loading or saving them, so # we build the vtk file directly vtk_table = tvtk.LookupTable() self.store_to_vtk_lookup_table(vtk_table) file = open( file_name_lut, "w" ) num_colors = vtk_table.number_of_table_values file.write( "LOOKUP_TABLE UnnamedTable %s\n" % ( num_colors ) ) for idx in range(num_colors): entry = vtk_table.get_table_value(idx) file.write("%.4f %.4f %.4f %.4f\n" % (entry[0],entry[1],entry[2],entry[3])) file.close() # if the python image library is aviable, also generate a small .jpg # file showing how the gradient looks. Based on code from Arnd Baecker. try: import Image except ImportError: pass # we're ready otherwise. no jpg output tho. else: Ny=64 # vertical size of the jpeg im = Image.new("RGBA",(num_colors,Ny)) for nx in range(num_colors): (r,g,b,a) = vtk_table.get_table_value(nx) for ny in range(Ny): im.putpixel((nx,ny),(int(255*r),int(255*g),int(255*b), int(255*a))) im.save(file_name_jpg,"JPEG") # it might be better to store the gradient as .png file, as these # are actually able to store alpha components (unlike jpg files) # and might also lead to a better compression. def load(self, file_name): """Load control point set from file FileName and recalculate gradient table.""" file = open( file_name, "r" ) version_tag = file.readline() version = float(version_tag.split()[1])+1e-5 if ( version >= 1.1 ): # read in the scaling function and the scaling function parameter function_line_split = file.readline().split() parameter_line = file.readline() if ( len(function_line_split)==2 ): self.scaling_function_string = function_line_split[1] else: self.scaling_function_string = "" self.scaling_function_parameter = float(parameter_line.split()[1]) else: self.scaling_function_string = "" self.scaling_function_parameter = 0.5 file.readline() new_control_points = [] while True: cur_line = file.readline() if len(cur_line) == 0: # readline is supposed to return an empty string at EOF break args = cur_line.split() if ( len(args) < 7 ): msg = "gradient file format broken at line:\n" msg += cur_line raise ValueError(msg) new_point = ColorControlPoint(active_channels="") new_point.set_pos( float( args[0] ) ) new_point.fixed = "True" == args[1] #bool( args[1] ) new_point.active_channels = args[2] (h,s,v,a) = ( float(args[3]), float(args[4]), float(args[5]), float(args[6]) ) new_point.color.set_hsva(h,s,v,a) new_control_points.append(new_point) file.close() self.control_points = new_control_points self.sort_control_points() #self.scaling_parameters_changed() self.update() ########################################################################## # `ChannelBase` class. ########################################################################## class ChannelBase(object): def __init__(self, function_control, name, rgb_color, channel_index, channel_mode): """arguments documented in function body""" self.control = function_control #owning function control self.name = name #'r','g','b','h','s','v' or 'a' self.rgb_color = rgb_color # ^-- string containing a tk color value with which to # paint this channel self.index = channel_index #0: r or h, 1: g or s, 2: b or v, 3: a self.mode = channel_mode #'hsv' or 'rgb' def get_value(self, color): """Return height value of the current channel for the given color. Range: 0..1""" if ( self.mode == 'hsv' ): return color.get_hsva()[self.index] else: return color.get_rgba()[self.index] def get_value_index(self, color): """Return height index of channel value of Color. Range: [1..ControlHeight]""" return int( 1+(self.control.height-1)*(1.0 - self.get_value(color)) ) def get_index_value(self, y): """Get value in [0..1] of height index y""" return min(1.0, max(0.0, 1.0 - float(y)/(self.control.height-1))) def set_value( self, color, new_value_on_this_channel ): """Color will be modified: NewValue.. will be set to the color channel that ``*self`` represents.""" if ( self.mode == 'hsv' ): hsva = [color.get_hsva()[0], color.get_hsva()[1], color.get_hsva()[2], color.get_hsva()[3] ] hsva[self.index] = new_value_on_this_channel if ( hsva[0] >= 1.0 - 1e-5 ): # hack to make sure hue does not jump back to 0.0 # when it should be at 1.0 (rgb <-> hsv xform not # invertible there) hsva[0] = 1.0 - 1e-5 color.set_hsva(hsva[0],hsva[1],hsva[2],hsva[3]) else: rgba = [color.get_rgba()[0], color.get_rgba()[1], color.get_rgba()[2], color.get_rgba()[3] ] rgba[self.index] = new_value_on_this_channel color.set_rgba(rgba[0],rgba[1],rgba[2],rgba[3]) def set_value_index( self, color, y ): """Color will be modified: the value assigned to the height index y will be set to the color channel of Color ``*self`` represents.""" self.set_value( color, self.get_index_value(y) ) def get_pos_index(self,f): """Return x-index for gradient position f in [0..1]""" return int(f*(self.control.width-1)) def get_index_pos(self,idx): """Return gradient position f in [0..1] for x-index Idx in [0..ControlWidth-1]""" return (1.0*idx)/(self.control.width-1) def paint(self, painter): """Paint current channel into Canvas (a canvas of a function control object). This should be overridden to do the actual painting. """ raise NotImplementedError ########################################################################## # `FunctionControl` class. ########################################################################## class FunctionControl(object): """Widget which displays a rectangular regions on which hue, sat, val or rgb values can be modified. An function control can have one or more attached color channels.""" # Radius around a control point center in which we'd still count a # click as "clicked the control point" control_pt_click_tolerance = 4 ChannelFactory = ChannelBase def __init__(self, master, gradient_table, color_space, width, height): """Initialize a function control widget on tkframe master. Parameters: ----------- master: The master widget. Note that this widget *must* have the methods specified in the `AbstractGradientEditorWidget` interface. on_table_changed: Callback function taking a bool argument of meaning 'FinalUpdate'. FinalUpdate is true if a control point is dropped, created or removed and false if the update is due to a control point currently beeing dragged (but not yet dropped) color_space: String which specifies the channels painted on this control. May be any combination of h,s,v,r,g,b,a in which each channel occurs only once. set_status_text: a callback used to set the status text when using the editor. """ self.text_map = {'r': 'RED', 'g': 'GREEN', 'b': 'BLUE', 'h': 'HUE', 's': 'SATURATION', 'v': 'VALUE', 'a': 'ALPHA'} self.master = master self.table = gradient_table self.gradient_table = gradient_table self.width = width self.height = height self.channels = [] # add the channels Channel = self.ChannelFactory for c in color_space: if c == 'r': self.channels += [Channel(self, "r", (255,0,0), 0, 'rgb' )] elif c == 'g': self.channels += [Channel(self, "g", (0,255,0), 1, 'rgb' )] elif c == 'b': self.channels += [Channel(self, "b", (0,0,255), 2, 'rgb' )] elif c == 'h': self.channels += [Channel(self, "h", (255,0,0), 0, 'hsv' )] elif c == 's': self.channels += [Channel(self, "s", (0,255,0), 1, 'hsv' )] elif c == 'v': self.channels += [Channel(self, "v", (0,0,255), 2, 'hsv' )] elif c == 'a': self.channels += [Channel(self, "a", (0,0,0), 3, 'hsv' )] # generate a list of channels on which markers should # be bound if moved on the current channel. since we interpolate # the colors in hsv space, changing the r, g or b coordinates # explicitely means that h, s and v all have to be fixed. self.active_channels_string = "" for channel in self.channels: self.active_channels_string += channel.name if ( ( 'r' in color_space ) or ( 'g' in color_space ) or ( 'b' in color_space ) ): for c in "hsv": if ( not ( c in self.active_channels_string ) ): self.active_channels_string += c if ( color_space == 'a' ): # alpha channels actually independent of all other channels. self.active_channels_string = 'a' # need to set to "None" initially or event handlers get confused. self.cur_drag = None #<- [channel,control_point] while something is dragged. def find_control_point(self, x, y): """Check if a control point lies near (x,y) or near x if y is None. returns [channel, control point] if found, None otherwise""" for channel in self.channels: for control_point in self.table.control_points: # take into account only control points which are # actually active for the current channel if ( not ( channel.name in control_point.active_channels ) ): continue point_x = channel.get_pos_index( control_point.pos ) point_y = channel.get_value_index( control_point.color ) y_ = y if ( y_ is None ): y_ = point_y if ( (point_x-x)**2 + (point_y-y_)**2 <= self.control_pt_click_tolerance**2 ): return [channel, control_point] return None def table_config_changed(self, final_update): """Called internally in the control if the configuration of the attached gradient table has changed due to actions of this control. Forwards the update/change notice.""" self.table.update() self.master.on_gradient_table_changed(final_update) ###################################################################### # Toolkit specific event methods. # Look at wx_gradient_editor.py and qt_gradient_editor.py to see # the methods that are necessary. ###################################################################### ########################################################################## # `AbstractGradientEditor` interface. ########################################################################## class AbstractGradientEditor(object): def on_gradient_table_changed(self, final_update): """ Update the gradient table and vtk lookuptable.""" raise NotImplementedError def set_status_text(self, msg): """Set the status on the status widget if you have one.""" raise NotImplementedError def get_table_range(self): """Return the CTF or LUT's scalar range.""" raise NotImplementedError ########################################################################## # `GradientEditorWidget` interface. ########################################################################## class GradientEditorWidget(AbstractGradientEditor): """A Gradient Editor widget that can be used anywhere. """ def __init__(self, master, vtk_table, on_change_color_table=None, colors=None): """ Parameters: ----------- vtk_table : the `tvtk.LookupTable` or `tvtk.VolumeProperty` object to set. on_change_color_table : A callback called when the color table changes. colors : list of 'rgb', 'hsv', 'h', 's', 'v', 'a' (Default : ['rgb', 'hsv', 'a']) 'rgb' creates one panel to edit Red, Green and Blue colors. 'hsv' creates one panel to edit Hue, Saturation and Value. 'h', 's', 'v', 'r', 'g', 'b', 'a' separately specified creates different panels for each. """ if colors is None: colors = ['rgb', 'hsv', 'a'] self.colors = colors self.gradient_preview_width = 300 self.gradient_preview_height = 50 self.channel_function_width = self.gradient_preview_width self.channel_function_height = 80 self.gradient_table = GradientTable(self.gradient_preview_width) self.vtk_color_table = vtk_table if isinstance(vtk_table, tvtk.LookupTable): self.vtk_table_is_lut = True else: # This is a tvtk.VolumeProperty self.vtk_table_is_lut = False # Initialize the editor with the volume property. self.gradient_table.load_from_vtk_volume_prop(vtk_table) self.on_change_color_table = on_change_color_table # Add the function controls: self.function_controls = [] self.tooltip_text = 'Left click: move control points\n'\ 'Right click: add/remove control points' editor_data = {'rgb': ('', 'RGB'), 'hsv': ('Hue: Red; Saturation: Green; '\ 'Value: Blue\n', 'HSV' ), 'h': ('', 'HUE'), 's': ('', 'SAT'), 'v': ('', 'VAL'), 'r': ('', 'RED'), 'g': ('', 'GREEN'), 'b': ('', 'BLUE'), 'a': ('', 'ALPHA'), } self.editor_data = editor_data ###################################################################### # `GradientEditorWidget` interface. ###################################################################### def set_status_text(self, msg): raise NotImplementedError def on_gradient_table_changed(self, final_update ): """ Update the gradient table and vtk lookuptable...""" # update all function controls. for control in self.function_controls: control.update() # repaint the gradient display or the external windows only # when the instant*** options are set or when the update was final. #if final_update or ( 1 == self.show_instant_gradients.get() ): if True: self.gradient_control.update() #if final_update or ( 1 == self.show_instant_feedback.get() ): if final_update: vtk_table = self.vtk_color_table if self.vtk_table_is_lut: self.gradient_table.store_to_vtk_lookup_table(vtk_table) else: rng = self.get_table_range() self.gradient_table.store_to_vtk_volume_prop(vtk_table, rng) cb = self.on_change_color_table if cb is not None: cb() def get_table_range(self): vtk_table = self.vtk_color_table if self.vtk_table_is_lut: return vtk_table.table_range else: return vtk_table.get_scalar_opacity().range def load(self, file_name): """Set the state of the color table using the given file. """ if len(file_name) == 0: return self.gradient_table.load(file_name) self.on_gradient_table_changed(final_update = True) def save(self, file_name): """Store the color table to the given file. This actually generates 3 files, a '.grad', a '.lut' file and a '.jpg' file. The .lut file can be used to setup a lookup table. The .grad file is used to set the state of the gradient table and the JPG file is an image of the how the lut will look. """ if len(file_name) == 0: return self.gradient_table.save(file_name)
dmsurti/mayavi
tvtk/util/gradient_editor.py
Python
bsd-3-clause
52,509
[ "VTK" ]
d3fa09b7ac813c672d4cd0e0a7c589038c052a733ca10dc02ec7ddad033095b5
#!/usr/bin/env python # Visitor pattern example 2 # ----------------------- # Visitables # ----------------------- class StockItem(object): pass class Necessity(StockItem): pass class Milk(Necessity): name = 'milk' price = 0.99 class Bread(Necessity): name = 'bread' price = 0.6 class Tobacco(StockItem): name = 'tobacco' price = 10.0 class Liquor(StockItem): name = 'liquor' price = 20.0 # ------------------------------- # A couple of different Visitors. # ------------------------------- class Visitor(object): def visit(self, visitable): visitable_class_hierarchy = [t.__name__ for t in type(visitable).mro()] for visitable_class_name in visitable_class_hierarchy: handler_name = 'visit_' + visitable_class_name handler = getattr(self, handler_name, None) if handler is None: continue return handler(visitable) return self.fallback(visitable) def fallback(self, visitable): raise RuntimeError('{} of type {} is not supported'.format( repr(visitable), type(visitable))) class TaxVisitor(Visitor): """ calculates price after tax for each item type """ def visit_Necessity(self, necessity): name = necessity.name price = necessity.price print name, "price with tax:", price def visit_Tobacco(self, tobacco): name = tobacco.name price = tobacco.price print name, "price with tax:", price + price * 0.3 def visit_Liquor(self, liquor): name = liquor.name price = liquor.price print name, "price with tax:", price + price * 0.2 class TotalPriceVisitor(Visitor): """ Calculates total price. An example of a Visitor with state. """ def __init__(self): self._total = 0.0 def visit_StockItem(self, item): self._total += item.price def show(self): print "Total price (without tax):", self._total milk = Milk() bread = Bread() tobacco = Tobacco() liquor = Liquor() tax = TaxVisitor() tax.visit(milk) tax.visit(bread) tax.visit(tobacco) tax.visit(liquor) total = TotalPriceVisitor() total.visit(milk) total.visit(bread) total.visit(tobacco) total.visit(liquor) total.show() ## OUTPUT: # milk price with tax: 0.99 # bread price with tax: 0.6 # tobacco price with tax: 13.0 # liquor price with tax: 24.0 # Total price (without tax): 31.59
blazk/patterns
visitor/taxes2.py
Python
gpl-2.0
2,474
[ "VisIt" ]
e00235c934ac0609517ab7bb9cb2f83c635c11c4943a12a4f3fd8814199196f6
############################################################################## # MDTraj: A Python Library for Loading, Saving, and Manipulating # Molecular Dynamics Trajectories. # Copyright 2012-2014 Stanford University and the Authors # # Authors: Robert T. McGibbon # Contributors: # # MDTraj is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as # published by the Free Software Foundation, either version 2.1 # of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with MDTraj. If not, see <http://www.gnu.org/licenses/>. ############################################################################## import tempfile, os import numpy as np import mdtraj as md from mdtraj.formats import GroTrajectoryFile, gro from mdtraj.testing import get_fn, eq, DocStringFormatTester TestDocstrings = DocStringFormatTester(gro, error_on_none=True) fd, temp = tempfile.mkstemp(suffix='.gro') def teardown_module(module): """remove the temporary file created by tests in this file this gets automatically called by nose""" os.close(fd) os.unlink(temp) def test_read_write(): t = md.load(get_fn('4waters.pdb')) with GroTrajectoryFile(temp, 'w') as f: f.write(t.xyz, t.topology) with GroTrajectoryFile(temp) as f: xyz, time, unitcell = f.read() top = f.topology eq(xyz, t.xyz, decimal=3) eq(list(top.atoms), list(t.top.atoms)) def test_load(): tref = md.load(get_fn('4waters.pdb')) with GroTrajectoryFile(temp, 'w') as f: f.write(tref.xyz, tref.topology) t = md.load(temp) eq(t.xyz, tref.xyz, decimal=3) eq(list(t.top.atoms), list(tref.top.atoms))
kyleabeauchamp/mdtraj
mdtraj/tests/test_gro.py
Python
lgpl-2.1
2,047
[ "MDTraj" ]
16137771959a397c2707bb551505237fd62949e2483e12585e489ed2bce6d52f
# Copyright (C) 2012,2013 # Max Planck Institute for Polymer Research # Copyright (C) 2008,2009,2010,2011 # Max-Planck-Institute for Polymer Research & Fraunhofer SCAI # # This file is part of ESPResSo++. # # ESPResSo++ is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ESPResSo++ is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """ *************************************** **espresso.interaction.HarmonicUnique** *************************************** """ from espresso import pmi, infinity from espresso.esutil import * from espresso.interaction.PotentialUniqueDist import * from espresso.interaction.Interaction import * from _espresso import interaction_HarmonicUnique, \ interaction_FixedPairDistListHarmonicUnique class HarmonicUniqueLocal(PotentialUniqueDistLocal, interaction_HarmonicUnique): 'The (local) HarmonicUnique potential.' def __init__(self, K=1.0): """Initialize the local HarmonicUnique object.""" if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup(): cxxinit(self, interaction_HarmonicUnique, K) class FixedPairDistListHarmonicUniqueLocal(InteractionLocal, interaction_FixedPairDistListHarmonicUnique): 'The (local) HarmonicUnique interaction using FixedPair lists.' def __init__(self, system, fpl, potential): if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup(): cxxinit(self, interaction_FixedPairDistListHarmonicUnique, system, fpl, potential) def setPotential(self, potential): if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup(): self.cxxclass.setPotential(self, potential) def setFixedPairList(self, fixedpairlist): if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup(): self.cxxclass.setFixedPairList(self, fixedpairlist) def getFixedPairList(self): if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup(): return self.cxxclass.getFixedPairList(self) if pmi.isController: class HarmonicUnique(PotentialUniqueDist): 'The HarmonicUnique potential.' pmiproxydefs = dict( cls = 'espresso.interaction.HarmonicUniqueLocal', pmiproperty = ['K'] ) class FixedPairDistListHarmonicUnique(Interaction): __metaclass__ = pmi.Proxy pmiproxydefs = dict( cls = 'espresso.interaction.FixedPairDistListHarmonicUniqueLocal', pmicall = ['setPotential','setFixedPairList','getFixedPairList'] )
BackupTheBerlios/espressopp
src/interaction/HarmonicUnique.py
Python
gpl-3.0
3,263
[ "ESPResSo" ]
270b5659cae1c2db899c5b81ef816cf646c959fa12dffb2ca0d8d2db12055a48
# -*- coding: utf-8 -*- # Copyright 2007-2011 The HyperSpy developers # # This file is part of HyperSpy. # # HyperSpy is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # HyperSpy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with HyperSpy. If not, see <http://www.gnu.org/licenses/>. import numpy as np from hyperspy.component import Component from .gaussian import Gaussian sqrt2pi = np.sqrt(2 * np.pi) class SEE(Component): """Secondary electron emission component for Photoemission Spectroscopy Attributes ---------- A : float Phi : float B : float sigma : float Resolution parameter. """ def __init__(self, A=1., Phi=1., B=0., sigma=0): Component.__init__(self, ('A', 'Phi', 'B', 'sigma')) self.A.value, self.Phi.value, self.B.value, self.sigma.value = \ A, Phi, B, sigma self._position = self.Phi # Boundaries self.A.bmin = 0. self.A.bmax = None self.convolved = True # Gradients self.A.grad = self.grad_A self.Phi.grad = self.grad_Phi self.B.grad = self.grad_B self.sigma.grad = self.grad_sigma # Resolution functions self.gaussian = Gaussian() self.gaussian.origin.free, self.gaussian.A.free = False, False self.gaussian.sigma.free = True self.gaussian.A.value = 1. def __repr__(self): return u'SEE' def function(self, x): """ """ if self.sigma.value: self.gaussian.sigma.value = self.sigma.value self.gaussian.origin.value = (x[-1] + x[0]) / 2 return np.convolve( self.gaussian.function(x), np.where( x > self.Phi.value, self.A.value * ( x - self.Phi.value) / ( x - self.Phi.value + self.B.value) ** 4, 0), 'same') else: return np.where(x > self.Phi.value, self.A.value * (x - self.Phi.value) / (x - self.Phi.value + self.B.value) ** 4, 0) def grad_A(self, x): """ """ if self.sigma.value: self.gaussian.sigma.value = self.sigma.value self.gaussian.origin.value = (x[-1] + x[0]) / 2 return np.convolve( self.gaussian.function(x), np.where( x > self.Phi.value, (x - self.Phi.value) / (x - self.Phi.value + self.B.value) ** 4, 0), 'same') else: return np.where(x > self.Phi.value, (x - self.Phi.value) / (x - self.Phi.value + self.B.value) ** 4, 0) def grad_sigma(self, x): """ """ self.gaussian.sigma.value = self.sigma.value self.gaussian.origin.value = (x[-1] + x[0]) / 2 return np.convolve( self.gaussian.grad_sigma(x), np.where( x > self.Phi.value, self.A.value * (x - self.Phi.value) / (x - self.Phi.value + self.B.value) ** 4, 0), 'same') def grad_Phi(self, x): """ """ if self.sigma.value: self.gaussian.sigma.value = self.sigma.value self.gaussian.origin.value = (x[-1] + x[0]) / 2 return np.convolve( self.gaussian.function(x), np.where( x > self.Phi.value, (4 * (x - self.Phi.value) * self.A.value) / (self.B.value + x - self.Phi.value) ** 5 - self.A.value / (self.B.value + x - self.Phi.value) ** 4, 0), 'same') else: return np.where( x > self.Phi.value, (4 * (x - self.Phi.value) * self.A.value) / (self.B.value + x - self.Phi.value) ** 5 - self.A.value / (self.B.value + x - self.Phi.value) ** 4, 0) def grad_B(self, x): if self.sigma.value: self.gaussian.sigma.value = self.sigma.value self.gaussian.origin.value = (x[-1] + x[0]) / 2 return np.convolve( self.gaussian.function(x), np.where( x > self.Phi.value, -(4 * (x - self.Phi.value) * self.A.value) / (self.B.value + x - self.Phi.value) ** 5, 0), 'same') else: return np.where( x > self.Phi.value, -(4 * (x - self.Phi.value) * self.A.value) / (self.B.value + x - self.Phi.value) ** 5, 0)
sillvan/hyperspy
hyperspy/_components/pes_see.py
Python
gpl-3.0
5,288
[ "Gaussian" ]
464a5c37a223ae482763f87455b44a30a5321ce3e31c9cf459d30e316f14b5cf
import pandas as pd import mdtraj as md from dipeptide_parameters import * reference = pd.read_csv("./experimental_data/baldwin_table1_populations.csv", index_col=0) data = [] for (ff, water, seq) in products: try: aa = seq.split("_")[1] t = md.load("./dcd/%s_%s_%s.dcd" % (ff, water, seq), top="./pdbs/%s.pdb" % (seq)) except: continue phi = md.compute_phi(t)[1][:, 0] * 180 / np.pi psi = md.compute_psi(t)[1][:, 0] * 180 / np.pi ass = assign(phi, psi) populations = pd.Series({"PPII":0.0, "beta":0.0, "alpha":0.0, "other":0.0}) populations += ass.value_counts(normalize=True) data.append([ff, water, aa, populations["PPII"], populations["beta"], populations["alpha"]]) data = pd.DataFrame(data, columns=["ff", "water", "aa", "PPII", "beta", "alpha"])
hainm/open-forcefield-group
nmr/ace_X_NME/code/analyze_populations.py
Python
gpl-2.0
813
[ "MDTraj" ]
ab5dca873c94d60b5c53f9098ca8d7cf875f97febe58c5514896aab1a9fe30d8
import logging import time import traceback import warnings from collections.abc import Mapping import numpy as np from ..conventions import cf_encoder from ..core import indexing from ..core.pycompat import dask_array_type from ..core.utils import FrozenDict, NdimSizeLenMixin # Create a logger object, but don't add any handlers. Leave that to user code. logger = logging.getLogger(__name__) NONE_VAR_NAME = "__values__" def _encode_variable_name(name): if name is None: name = NONE_VAR_NAME return name def _decode_variable_name(name): if name == NONE_VAR_NAME: name = None return name def find_root_and_group(ds): """Find the root and group name of a netCDF4/h5netcdf dataset.""" hierarchy = () while ds.parent is not None: hierarchy = (ds.name,) + hierarchy ds = ds.parent group = "/" + "/".join(hierarchy) return ds, group def robust_getitem(array, key, catch=Exception, max_retries=6, initial_delay=500): """ Robustly index an array, using retry logic with exponential backoff if any of the errors ``catch`` are raised. The initial_delay is measured in ms. With the default settings, the maximum delay will be in the range of 32-64 seconds. """ assert max_retries >= 0 for n in range(max_retries + 1): try: return array[key] except catch: if n == max_retries: raise base_delay = initial_delay * 2 ** n next_delay = base_delay + np.random.randint(base_delay) msg = ( "getitem failed, waiting %s ms before trying again " "(%s tries remaining). Full traceback: %s" % (next_delay, max_retries - n, traceback.format_exc()) ) logger.debug(msg) time.sleep(1e-3 * next_delay) class BackendArray(NdimSizeLenMixin, indexing.ExplicitlyIndexed): __slots__ = () def __array__(self, dtype=None): key = indexing.BasicIndexer((slice(None),) * self.ndim) return np.asarray(self[key], dtype=dtype) class AbstractDataStore(Mapping): __slots__ = () def __iter__(self): return iter(self.variables) def __getitem__(self, key): return self.variables[key] def __len__(self): return len(self.variables) def get_dimensions(self): # pragma: no cover raise NotImplementedError() def get_attrs(self): # pragma: no cover raise NotImplementedError() def get_variables(self): # pragma: no cover raise NotImplementedError() def get_encoding(self): return {} def load(self): """ This loads the variables and attributes simultaneously. A centralized loading function makes it easier to create data stores that do automatic encoding/decoding. For example:: class SuffixAppendingDataStore(AbstractDataStore): def load(self): variables, attributes = AbstractDataStore.load(self) variables = {'%s_suffix' % k: v for k, v in variables.items()} attributes = {'%s_suffix' % k: v for k, v in attributes.items()} return variables, attributes This function will be called anytime variables or attributes are requested, so care should be taken to make sure its fast. """ variables = FrozenDict( (_decode_variable_name(k), v) for k, v in self.get_variables().items() ) attributes = FrozenDict(self.get_attrs()) return variables, attributes @property def variables(self): # pragma: no cover warnings.warn( "The ``variables`` property has been deprecated and " "will be removed in xarray v0.11.", FutureWarning, stacklevel=2, ) variables, _ = self.load() return variables @property def attrs(self): # pragma: no cover warnings.warn( "The ``attrs`` property has been deprecated and " "will be removed in xarray v0.11.", FutureWarning, stacklevel=2, ) _, attrs = self.load() return attrs @property def dimensions(self): # pragma: no cover warnings.warn( "The ``dimensions`` property has been deprecated and " "will be removed in xarray v0.11.", FutureWarning, stacklevel=2, ) return self.get_dimensions() def close(self): pass def __enter__(self): return self def __exit__(self, exception_type, exception_value, traceback): self.close() class ArrayWriter: __slots__ = ("sources", "targets", "regions", "lock") def __init__(self, lock=None): self.sources = [] self.targets = [] self.regions = [] self.lock = lock def add(self, source, target, region=None): if isinstance(source, dask_array_type): self.sources.append(source) self.targets.append(target) self.regions.append(region) else: if region: target[region] = source else: target[...] = source def sync(self, compute=True): if self.sources: import dask.array as da # TODO: consider wrapping targets with dask.delayed, if this makes # for any discernable difference in perforance, e.g., # targets = [dask.delayed(t) for t in self.targets] delayed_store = da.store( self.sources, self.targets, lock=self.lock, compute=compute, flush=True, regions=self.regions, ) self.sources = [] self.targets = [] self.regions = [] return delayed_store class AbstractWritableDataStore(AbstractDataStore): __slots__ = () def encode(self, variables, attributes): """ Encode the variables and attributes in this store Parameters ---------- variables : dict-like Dictionary of key/value (variable name / xr.Variable) pairs attributes : dict-like Dictionary of key/value (attribute name / attribute) pairs Returns ------- variables : dict-like attributes : dict-like """ variables = {k: self.encode_variable(v) for k, v in variables.items()} attributes = {k: self.encode_attribute(v) for k, v in attributes.items()} return variables, attributes def encode_variable(self, v): """encode one variable""" return v def encode_attribute(self, a): """encode one attribute""" return a def set_dimension(self, d, l): # pragma: no cover raise NotImplementedError() def set_attribute(self, k, v): # pragma: no cover raise NotImplementedError() def set_variable(self, k, v): # pragma: no cover raise NotImplementedError() def store_dataset(self, dataset): """ in stores, variables are all variables AND coordinates in xarray.Dataset variables are variables NOT coordinates, so here we pass the whole dataset in instead of doing dataset.variables """ self.store(dataset, dataset.attrs) def store( self, variables, attributes, check_encoding_set=frozenset(), writer=None, unlimited_dims=None, ): """ Top level method for putting data on this store, this method: - encodes variables/attributes - sets dimensions - sets variables Parameters ---------- variables : dict-like Dictionary of key/value (variable name / xr.Variable) pairs attributes : dict-like Dictionary of key/value (attribute name / attribute) pairs check_encoding_set : list-like List of variables that should be checked for invalid encoding values writer : ArrayWriter unlimited_dims : list-like List of dimension names that should be treated as unlimited dimensions. """ if writer is None: writer = ArrayWriter() variables, attributes = self.encode(variables, attributes) self.set_attributes(attributes) self.set_dimensions(variables, unlimited_dims=unlimited_dims) self.set_variables( variables, check_encoding_set, writer, unlimited_dims=unlimited_dims ) def set_attributes(self, attributes): """ This provides a centralized method to set the dataset attributes on the data store. Parameters ---------- attributes : dict-like Dictionary of key/value (attribute name / attribute) pairs """ for k, v in attributes.items(): self.set_attribute(k, v) def set_variables(self, variables, check_encoding_set, writer, unlimited_dims=None): """ This provides a centralized method to set the variables on the data store. Parameters ---------- variables : dict-like Dictionary of key/value (variable name / xr.Variable) pairs check_encoding_set : list-like List of variables that should be checked for invalid encoding values writer : ArrayWriter unlimited_dims : list-like List of dimension names that should be treated as unlimited dimensions. """ for vn, v in variables.items(): name = _encode_variable_name(vn) check = vn in check_encoding_set target, source = self.prepare_variable( name, v, check, unlimited_dims=unlimited_dims ) writer.add(source, target) def set_dimensions(self, variables, unlimited_dims=None): """ This provides a centralized method to set the dimensions on the data store. Parameters ---------- variables : dict-like Dictionary of key/value (variable name / xr.Variable) pairs unlimited_dims : list-like List of dimension names that should be treated as unlimited dimensions. """ if unlimited_dims is None: unlimited_dims = set() existing_dims = self.get_dimensions() dims = {} for v in unlimited_dims: # put unlimited_dims first dims[v] = None for v in variables.values(): dims.update(dict(zip(v.dims, v.shape))) for dim, length in dims.items(): if dim in existing_dims and length != existing_dims[dim]: raise ValueError( "Unable to update size for existing dimension" "%r (%d != %d)" % (dim, length, existing_dims[dim]) ) elif dim not in existing_dims: is_unlimited = dim in unlimited_dims self.set_dimension(dim, length, is_unlimited) class WritableCFDataStore(AbstractWritableDataStore): __slots__ = () def encode(self, variables, attributes): # All NetCDF files get CF encoded by default, without this attempting # to write times, for example, would fail. variables, attributes = cf_encoder(variables, attributes) variables = {k: self.encode_variable(v) for k, v in variables.items()} attributes = {k: self.encode_attribute(v) for k, v in attributes.items()} return variables, attributes
jhamman/xarray
xarray/backends/common.py
Python
apache-2.0
11,865
[ "NetCDF" ]
786f25f24117be72eac48ba2c252c26bb7a5c6828d5c1dcfed55e2b315c53b84
from __future__ import absolute_import import unittest import math import json import os import numpy as np from pymatgen.analysis.elasticity.tensors import * from pymatgen.core.operations import SymmOp from pymatgen.symmetry.analyzer import SpacegroupAnalyzer from pymatgen.util.testing import PymatgenTest from pymatgen import Structure test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..", 'test_files') class TensorTest(PymatgenTest): def setUp(self): self.vec = Tensor([1., 0., 0.]) self.rand_rank2 = Tensor(np.random.randn(3,3)) self.rand_rank3 = Tensor(np.random.randn(3,3,3)) self.rand_rank4 = Tensor(np.random.randn(3,3,3,3)) a = 3.14 * 42.5 / 180 self.non_symm = SquareTensor([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6], [0.2, 0.5, 0.5]]) self.rotation = SquareTensor([[math.cos(a), 0, math.sin(a)], [0, 1, 0], [-math.sin(a), 0, math.cos(a)]]) self.low_val = Tensor([[1e-6, 1 + 1e-5, 1e-6], [1 + 1e-6, 1e-6, 1e-6], [1e-7, 1e-7, 1 + 1e-5]]) self.symm_rank2 = Tensor([[1, 2, 3], [2, 4, 5], [3, 5, 6]]) self.symm_rank3 = Tensor([[[1, 2, 3], [2, 4, 5], [3, 5, 6]], [[2, 4, 5], [4, 7, 8], [5, 8, 9]], [[3, 5, 6], [5, 8, 9], [6, 9, 10]]]) self.symm_rank4 = Tensor([[[[1.2, 0.4, -0.92], [0.4, 0.05, 0.11], [-0.92, 0.11, -0.02]], [[0.4, 0.05, 0.11], [0.05, -0.47, 0.09], [0.11, 0.09, -0.]], [[-0.92, 0.11, -0.02], [0.11, 0.09, 0.], [-0.02, 0., -0.3]]], [[[0.4, 0.05, 0.11], [0.05, -0.47, 0.09], [0.11, 0.09, 0.]], [[0.05, -0.47, 0.09], [-0.47, 0.17, 0.62], [0.09, 0.62, 0.3]], [[0.11, 0.09, 0.], [0.09, 0.62, 0.3], [0., 0.3, -0.18]]], [[[-0.92, 0.11, -0.02], [0.11, 0.09, 0.], [-0.02, 0, -0.3]], [[0.11, 0.09, 0.], [0.09, 0.62, 0.3], [0., 0.3, -0.18]], [[-0.02, 0., -0.3], [0., 0.3, -0.18], [-0.3, -0.18, -0.51]]]]) # Structural symmetries tested using BaNiO3 piezo/elastic tensors self.fit_r3 = Tensor([[[0., 0., 0.03839], [0., 0., 0.], [0.03839, 0., 0.]], [[0., 0., 0.], [0., 0., 0.03839], [0., 0.03839, 0.]], [[6.89822, 0., 0.], [0., 6.89822, 0.], [0., 0., 27.4628]]]) self.fit_r4 = Tensor([[[[157.9, 0., 0.], [0., 63.1, 0.], [0., 0., 29.4]], [[0., 47.4, 0.], [47.4, 0., 0.], [0., 0., 0.]], [[0., 0., 4.3], [0., 0., 0.], [4.3, 0., 0.]]], [[[0., 47.4, 0.], [47.4, 0., 0.], [0., 0., 0.]], [[63.1, 0., 0.], [0., 157.9, 0.], [0., 0., 29.4]], [[0., 0., 0.], [0., 0., 4.3], [0., 4.3, 0.]]], [[[0., 0., 4.3], [0., 0., 0.], [4.3, 0., 0.]], [[0., 0., 0.], [0., 0., 4.3], [0., 4.3, 0.]], [[29.4, 0., 0.], [0., 29.4, 0.], [0., 0., 207.6]]]]) self.unfit4 = Tensor([[[[161.26, 0., 0.], [0., 62.76, 0.], [0., 0., 30.18]], [[0., 47.08, 0.], [47.08, 0., 0.], [0., 0., 0.]], [[0., 0., 4.23], [0., 0., 0.], [4.23, 0., 0.]]], [[[0., 47.08, 0.], [47.08, 0., 0.], [0., 0., 0.]], [[62.76, 0., 0.], [0., 155.28, -0.06], [0., -0.06, 28.53]], [[0., 0., 0.], [0., -0.06, 4.44], [0., 4.44, 0.]]], [[[0., 0., 4.23], [0., 0., 0.], [4.23, 0., 0.]], [[0., 0., 0.], [0., -0.06, 4.44], [0., 4.44, 0.]], [[30.18, 0., 0.], [0., 28.53, 0.], [0., 0., 207.57]]]]) self.structure = self.get_structure('BaNiO3') ieee_file_path = os.path.join(test_dir, "ieee_conversion_data.json") with open(ieee_file_path) as f: self.ieee_data = json.load(f) def test_new(self): bad_2 = np.zeros((4, 4)) bad_3 = np.zeros((4, 4, 4)) self.assertRaises(ValueError, Tensor, bad_2) self.assertRaises(ValueError, Tensor, bad_3) self.assertEqual(self.rand_rank2.rank, 2) self.assertEqual(self.rand_rank3.rank, 3) self.assertEqual(self.rand_rank4.rank, 4) def test_zeroed(self): self.assertArrayEqual(self.low_val.zeroed(), Tensor([[0, 1 + 1e-5, 0], [1 + 1e-6, 0, 0], [0, 0, 1 + 1e-5]])) self.assertArrayEqual(self.low_val.zeroed(tol=1e-6), Tensor([[1e-6, 1 + 1e-5, 1e-6], [1 + 1e-6, 1e-6, 1e-6], [0, 0, 1 + 1e-5]])) self.assertArrayEqual(Tensor([[1e-6, -30, 1], [1e-7, 1, 0], [1e-8, 0, 1]]).zeroed(), Tensor([[0, -30, 1], [0, 1, 0], [0, 0, 1]])) def test_transform(self): # Rank 3 tensor = Tensor(np.arange(0, 27).reshape(3, 3, 3)) symm_op = SymmOp.from_axis_angle_and_translation([0, 0, 1], 30, False, [0, 0, 1]) new_tensor = tensor.transform(symm_op) self.assertArrayAlmostEqual(new_tensor, [[[-0.871, -2.884, -1.928], [-2.152, -6.665, -4.196], [-1.026, -2.830, -1.572]], [[0.044, 1.531, 1.804], [4.263, 21.008, 17.928], [5.170, 23.026, 18.722]], [[1.679, 7.268, 5.821], [9.268, 38.321, 29.919], [8.285, 33.651, 26.000]]], 3) def test_rotate(self): self.assertArrayEqual(self.vec.rotate([[0, -1, 0], [1, 0, 0], [0, 0, 1]]), [0, 1, 0]) self.assertArrayAlmostEqual(self.non_symm.rotate(self.rotation), SquareTensor([[0.531, 0.485, 0.271], [0.700, 0.5, 0.172], [0.171, 0.233, 0.068]]), decimal=3) self.assertRaises(ValueError, self.non_symm.rotate, self.symm_rank2) def test_symmetrized(self): self.assertTrue(self.rand_rank2.symmetrized.is_symmetric()) self.assertTrue(self.rand_rank3.symmetrized.is_symmetric()) self.assertTrue(self.rand_rank4.symmetrized.is_symmetric()) def test_is_symmetric(self): self.assertTrue(self.symm_rank2.is_symmetric()) self.assertTrue(self.symm_rank3.is_symmetric()) self.assertTrue(self.symm_rank4.is_symmetric()) tol_test = self.symm_rank4 tol_test[0, 1, 2, 2] += 1e-6 self.assertFalse(self.low_val.is_symmetric(tol=1e-8)) def test_fit_to_structure(self): new_fit = self.unfit4.fit_to_structure(self.structure) self.assertArrayAlmostEqual(new_fit, self.fit_r4, 1) def test_is_fit_to_structure(self): self.assertFalse(self.unfit4.is_fit_to_structure(self.structure)) self.assertTrue(self.fit_r3.is_fit_to_structure(self.structure)) self.assertTrue(self.fit_r4.is_fit_to_structure(self.structure)) def test_convert_to_ieee(self): for entry in self.ieee_data: xtal = entry['xtal'] orig = Tensor(entry['original_tensor']) ieee = Tensor(entry['ieee_tensor']) struct = Structure.from_dict(entry['structure']) diff = np.max(abs(ieee - orig.convert_to_ieee(struct))) err_msg = "{} IEEE conversion failed with max diff {}. Numpy version: {}".format( xtal, diff, np.__version__) print(ieee) print(orig.convert_to_ieee(struct)) self.assertArrayAlmostEqual(ieee, orig.convert_to_ieee(struct), err_msg=err_msg, decimal=3) def test_from_voigt(self): with self.assertRaises(ValueError): Tensor.from_voigt([[59.33, 28.08, 28.08, 0], [28.08, 59.31, 28.07, 0], [28.08, 28.07, 59.32, 0, 0], [0, 0, 0, 26.35, 0], [0, 0, 0, 0, 26.35]]) # Rank 4 Tensor.from_voigt([[59.33, 28.08, 28.08, 0, 0, 0], [28.08, 59.31, 28.07, 0, 0, 0], [28.08, 28.07, 59.32, 0, 0, 0], [0, 0, 0, 26.35, 0, 0], [0, 0, 0, 0, 26.35, 0], [0, 0, 0, 0, 0, 26.35]]) # Rank 3 Tensor.from_voigt(np.zeros((3, 6))) # Rank 2 Tensor.from_voigt(np.zeros(6)) # Addresses occasional cast issues for integers Tensor.from_voigt(np.arange(6)) def test_symmetry_reduce(self): tbs = [Tensor.from_voigt(row) for row in np.eye(6)*0.01] reduced = symmetry_reduce(tbs, self.get_structure("Sn")) self.assertEqual(len(reduced), 2) self.assertArrayEqual([len(i) for i in reduced.values()], [2, 2]) reconstructed = [] for k, v in reduced.items(): reconstructed.extend([k.voigt] + [k.transform(op).voigt for op in v]) reconstructed = sorted(reconstructed, key = lambda x: np.argmax(x)) self.assertArrayAlmostEqual([tb for tb in reconstructed], np.eye(6)*0.01) class TensorCollectionTest(PymatgenTest): def setUp(self): self.seq_tc = [t for t in np.arange(4*3**3).reshape((4, 3, 3, 3))] self.seq_tc = TensorCollection(self.seq_tc) self.rand_tc = TensorCollection([t for t in np.random.random((4, 3, 3))]) self.diff_rank = TensorCollection([np.ones([3]*i) for i in range(2, 5)]) self.struct = self.get_structure("Si") ieee_file_path = os.path.join(test_dir, "ieee_conversion_data.json") with open(ieee_file_path) as f: self.ieee_data = json.load(f) def list_based_function_check(self, attribute, coll, *args, **kwargs): """ This function allows for more efficient testing of list-based functions in a "collection"-style class like TensorCollection It ensures that the test function """ tc_orig = TensorCollection(coll) tc_mod = getattr(tc_orig, attribute) if callable(tc_mod): tc_mod = tc_mod(*args, **kwargs) for t_orig, t_mod in zip(tc_orig, tc_mod): this_mod = getattr(t_orig, attribute) if callable(this_mod): this_mod = this_mod(*args, **kwargs) if isinstance(this_mod, np.ndarray): self.assertArrayAlmostEqual(this_mod, t_mod) def test_list_based_functions(self): # zeroed tc = TensorCollection([1e-4*Tensor(np.eye(3))]*4) for t in tc.zeroed(): self.assertArrayEqual(t, np.zeros((3, 3))) for t in tc.zeroed(1e-5): self.assertArrayEqual(t, 1e-4*np.eye(3)) self.list_based_function_check("zeroed", tc) self.list_based_function_check("zeroed", tc, tol=1e-5) # transform symm_op = SymmOp.from_axis_angle_and_translation([0, 0, 1], 30, False, [0, 0, 1]) self.list_based_function_check("transform", self.seq_tc, symm_op=symm_op) # symmetrized self.list_based_function_check("symmetrized", self.seq_tc) # rotation a = 3.14 * 42.5 / 180 rotation = SquareTensor([[math.cos(a), 0, math.sin(a)], [0, 1, 0], [-math.sin(a), 0, math.cos(a)]]) self.list_based_function_check("rotate", self.diff_rank, matrix=rotation) # is_symmetric self.assertFalse(self.seq_tc.is_symmetric()) self.assertTrue(self.diff_rank.is_symmetric()) # fit_to_structure self.list_based_function_check("fit_to_structure", self.diff_rank, self.struct) self.list_based_function_check("fit_to_structure", self.seq_tc, self.struct) # fit_to_structure self.list_based_function_check("fit_to_structure", self.diff_rank, self.struct) self.list_based_function_check("fit_to_structure", self.seq_tc, self.struct) # voigt self.list_based_function_check("voigt", self.diff_rank) # is_voigt_symmetric self.assertTrue(self.diff_rank.is_voigt_symmetric()) self.assertFalse(self.seq_tc.is_voigt_symmetric()) # Convert to ieee for entry in self.ieee_data[:2]: xtal = entry['xtal'] tc = TensorCollection([entry['original_tensor']]*3) struct = Structure.from_dict(entry['structure']) self.list_based_function_check("convert_to_ieee", tc, struct) # from_voigt tc_input = [t for t in np.random.random((3, 6, 6))] tc = TensorCollection.from_voigt(tc_input) for t_input, t in zip(tc_input, tc): self.assertArrayAlmostEqual(Tensor.from_voigt(t_input), t) class SquareTensorTest(PymatgenTest): def setUp(self): self.rand_sqtensor = SquareTensor(np.random.randn(3, 3)) self.symm_sqtensor = SquareTensor([[0.1, 0.3, 0.4], [0.3, 0.5, 0.2], [0.4, 0.2, 0.6]]) self.non_invertible = SquareTensor([[0.1, 0, 0], [0.2, 0, 0], [0, 0, 0]]) self.non_symm = SquareTensor([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6], [0.2, 0.5, 0.5]]) self.low_val = SquareTensor([[1e-6, 1 + 1e-5, 1e-6], [1 + 1e-6, 1e-6, 1e-6], [1e-7, 1e-7, 1 + 1e-5]]) self.low_val_2 = SquareTensor([[1e-6, -1 - 1e-6, 1e-6], [1 + 1e-7, 1e-6, 1e-6], [1e-7, 1e-7, 1 + 1e-6]]) a = 3.14 * 42.5 / 180 self.rotation = SquareTensor([[math.cos(a), 0, math.sin(a)], [0, 1, 0], [-math.sin(a), 0, math.cos(a)]]) def test_new(self): non_sq_matrix = [[0.1, 0.2, 0.1], [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.1, 0.1]] bad_matrix = [[0.1, 0.2], [0.2, 0.3, 0.4], [0.2, 0.3, 0.5]] too_high_rank = np.zeros((3,3,3)) self.assertRaises(ValueError, SquareTensor, non_sq_matrix) self.assertRaises(ValueError, SquareTensor, bad_matrix) self.assertRaises(ValueError, SquareTensor, too_high_rank) def test_properties(self): # transpose self.assertArrayEqual(self.non_symm.trans, SquareTensor([[0.1, 0.4, 0.2], [0.2, 0.5, 0.5], [0.3, 0.6, 0.5]])) self.assertArrayEqual(self.rand_sqtensor.trans, np.transpose(self.rand_sqtensor)) self.assertArrayEqual(self.symm_sqtensor, self.symm_sqtensor.trans) # inverse self.assertArrayEqual(self.non_symm.inv, np.linalg.inv(self.non_symm)) with self.assertRaises(ValueError): self.non_invertible.inv # determinant self.assertEqual(self.rand_sqtensor.det, np.linalg.det(self.rand_sqtensor)) self.assertEqual(self.non_invertible.det, 0.0) self.assertEqual(self.non_symm.det, 0.009) # symmetrized self.assertArrayEqual(self.rand_sqtensor.symmetrized, 0.5 * (self.rand_sqtensor + self.rand_sqtensor.trans)) self.assertArrayEqual(self.symm_sqtensor, self.symm_sqtensor.symmetrized) self.assertArrayAlmostEqual(self.non_symm.symmetrized, SquareTensor([[0.1, 0.3, 0.25], [0.3, 0.5, 0.55], [0.25, 0.55, 0.5]])) # invariants i1 = np.trace(self.rand_sqtensor) i2 = self.rand_sqtensor[0, 0] * self.rand_sqtensor[1, 1] + \ self.rand_sqtensor[1, 1] * self.rand_sqtensor[2, 2] + \ self.rand_sqtensor[2, 2] * self.rand_sqtensor[0, 0] - \ self.rand_sqtensor[0, 1] * self.rand_sqtensor[1, 0] - \ self.rand_sqtensor[0, 2] * self.rand_sqtensor[2, 0] - \ self.rand_sqtensor[2, 1] * self.rand_sqtensor[1, 2] i3 = np.linalg.det(self.rand_sqtensor) self.assertArrayAlmostEqual([i1, i2, i3], self.rand_sqtensor.principal_invariants) def test_is_rotation(self): self.assertTrue(self.rotation.is_rotation()) self.assertFalse(self.symm_sqtensor.is_rotation()) self.assertTrue(self.low_val_2.is_rotation()) self.assertFalse(self.low_val_2.is_rotation(tol=1e-8)) def test_get_scaled(self): self.assertArrayEqual(self.non_symm.get_scaled(10.), SquareTensor([[1, 2, 3], [4, 5, 6], [2, 5, 5]])) def test_polar_decomposition(self): u, p = self.rand_sqtensor.polar_decomposition() self.assertArrayAlmostEqual(np.dot(u, p), self.rand_sqtensor) self.assertArrayAlmostEqual(np.eye(3), np.dot(u, np.conjugate(np.transpose(u)))) if __name__ == '__main__': unittest.main()
tallakahath/pymatgen
pymatgen/analysis/elasticity/tests/test_tensors.py
Python
mit
21,511
[ "pymatgen" ]
84a14901636b0ddd705809956203a004aad9aa29adc17ffd9ebbd128232bac12
# coding=utf-8 # Copyright 2022 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=g-doc-return-or-yield,missing-docstring,unused-import,line-too-long,invalid-name,pointless-string-statement """Global config used for synchronization in ES-MAML. A class whose attributes contain hyperparameter values and functions based on those hyperparameters. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import copy import datetime import os import numpy as np from es_maml import blackbox_maml_objects from es_maml import policies from es_maml import task from es_maml.blackbox import blackbox_optimization_algorithms from es_maml.blackbox import regression_optimizers from es_maml.zero_order import adaptation_optimizers class Config(object): def __init__(self, **kwargs): for k, v in kwargs.items(): setattr(self, k, v) def get_config(**kwargs): config = Config(**kwargs) config.folder_name = "./es_maml_logs/" config.run_locally = False config.num_queries = 20 config.task_batch_size = 1 config.train_set_size = 50 config.test_set_size = 50 config.num_rollouts_per_parameter = 1 config.es_step_size = 0.01 config.alpha = 0.05 config.hidden_layers = [] config.nb_iterations = 10000 config.fvalues_normalization = True config.algorithm = "zero_order" # "zeroth_order" # "first_order" if config.algorithm == "zero_order": config.total_num_perturbations = 150 config.num_exact_evals = 100 config.num_repeats = 1 config.nb_perturbations_per_worker = 1 config.es_precision_parameter = 0.1 config.adaptation_precision_parameter = 0.1 config.hyperparameters_update_method = "state_normalization" config.es_est_type = "antithetic" config.adaptation_est_type = "antithetic" if config.es_est_type == "forward_fd": config.train_workers = config.total_num_perturbations * config.num_repeats + config.num_repeats + config.num_exact_evals elif config.es_est_type == "antithetic": config.train_workers = 2 * config.total_num_perturbations * config.num_repeats + config.num_repeats + config.num_exact_evals config.test_workers = 50 assert config.test_set_size == config.test_workers config.test_parallel_evals = 1 config.num_servers = config.train_workers + config.test_workers config.adaptation_string = "MC" # "SKLRegression" # "MC" # "GeneralRegression" config.regression_optimizer_string = "l1_jacobian_decoder" config.regularizer = 0.0 config.perturbation_type = "Gaussian" # "Gaussian" # "DPP" config.dpp_rho = 5 config.adapter_fn_string = "BlackboxAdaptation" # "BlackboxAdaptation" # "HillClimbAdaptation" config.hillclimb_parallel_alg = "average" # "batch" # "average" config.hillclimb_parallel_evaluations = 1 elif config.algorithm == "first_order": config.use_hess = False config.work_split = "perturbation_per_worker" config.num_perturbations = 300 # for per-perturbation work_split config.num_servers = config.task_batch_size * (config.num_perturbations + 1) config.rollout_repeats = 1 config.antithetic = False config.precision_parameter = 0.1 config.hyperparameters_update_method = "None" config.test_frequency = 5 config.adapter_fn_string = "None" config.critical = 0.4 config.task_name = "NavigationTask2d" config.combo_task_num_subset_goals = 1 config.combo_task_num_goals = 4 config.test_frequency = 10 config.horizon = 200 if config.task_name in ["NavigationTask2d", "NavigationTask4corner"]: config.horizon = 100 elif config.task_name in ["NavigationTaskCombo"]: config.horizon = 100 return config def generate_config(config, **kwargs): current_time_string = kwargs.get("current_time_string", "NA") config.json_hparams = copy.deepcopy(config.__dict__) def make_task_fn(task_id): return getattr(task, config.task_name)( task_id, num_subset_goals=config.combo_task_num_subset_goals, num_goals=config.combo_task_num_goals) config.make_task_fn = make_task_fn if config.algorithm == "zero_order": def es_blackbox_optimizer_fn(metaparams): return blackbox_optimization_algorithms.MCBlackboxOptimizer( config.es_precision_parameter, config.es_est_type, config.fvalues_normalization, config.hyperparameters_update_method, metaparams, config.es_step_size, num_top_directions=0) config.es_blackbox_optimizer_fn = es_blackbox_optimizer_fn def adaptation_blackbox_optimizer_fn(metaparams): if config.adaptation_string == "MC": return blackbox_optimization_algorithms.MCBlackboxOptimizer( config.adaptation_precision_parameter, config.adaptation_est_type, config.fvalues_normalization, config.hyperparameters_update_method, metaparams, config.alpha, num_top_directions=0) elif config.adaptation_string == "SKLRegression": return blackbox_optimization_algorithms.SklearnRegressionBlackboxOptimizer( "lasso", config.regularizer, config.est_type, config.fvalues_normalization, config.hyperparameters_update_method, metaparams, config.alpha) elif config.adaptation_string == "GeneralRegression": return blackbox_optimization_algorithms.GeneralRegressionBlackboxOptimizer( regression_method=getattr(regression_optimizers, config.regression_optimizer_string), regularizer=config.regularizer, est_type=config.adaptation_est_type, normalize_fvalues=config.fvalues_normalization, hyperparameters_update_method=config.hyperparameters_update_method, extra_params=metaparams, step_size=config.alpha) config.adaptation_blackbox_optimizer_fn = adaptation_blackbox_optimizer_fn temp_env = make_task_fn(0) config.state_dimensionality = temp_env.state_dimensionality() config.action_dimensionality = temp_env.action_dimensionality() def rl_policy_fn(): return policies.DeterministicNumpyPolicy(config.state_dimensionality, config.action_dimensionality, config.hidden_layers) config.rl_policy_fn = rl_policy_fn def RLMAMLBlackboxObject_fn(): return blackbox_maml_objects.RLMAMLBlackboxObject(config) config.RLMAMLBlackboxObject_fn = RLMAMLBlackboxObject_fn def sl_policy_fn(): return policies.Basic_TF_Policy(config.state_dimensionality, config.action_dimensionality, config.hidden_layers, "sl") config.sl_policy_fn = sl_policy_fn def LossTensorMAMLBlackboxObject_fn(): return blackbox_maml_objects.LossTensorMAMLBlackboxObject(config) config.LossTensorMAMLBlackboxObject_fn = LossTensorMAMLBlackboxObject_fn def blackbox_object_fn(): blackbox_object = config.RLMAMLBlackboxObject_fn() policy_param_dim = blackbox_object.policy_param_num if config.algorithm == "zero_order": adaptation_blackbox_optimizer = config.adaptation_blackbox_optimizer_fn( blackbox_object.get_metaparams()) adapter_fn = getattr(adaptation_optimizers, config.adapter_fn_string) adapter = adapter_fn( num_queries=config.num_queries, adaptation_blackbox_optimizer=adaptation_blackbox_optimizer, adaptation_precision_parameter=config.adaptation_precision_parameter, policy_param_dim=policy_param_dim, perturbation_type=config.perturbation_type, dpp_rho=config.dpp_rho, parallel_alg=config.hillclimb_parallel_alg, parallel_evaluations=config.hillclimb_parallel_evaluations) blackbox_object.use_adapter(adapter) return blackbox_object config.blackbox_object_fn = blackbox_object_fn if config.hidden_layers: hidden_layers_name = "H" + "_".join([str(h) for h in config.hidden_layers]) else: hidden_layers_name = "L" local_logfoldername = "_".join([ config.task_name, hidden_layers_name, "Q" + str(config.num_queries), current_time_string, config.adapter_fn_string ]) config.global_logfoldername = os.path.join(config.folder_name, local_logfoldername) config.hparams_file = os.path.join(config.global_logfoldername, "hparams.json") config.log_frequency = 1 config.params_file = os.path.join(config.global_logfoldername, "params.csv") config.best_params_file = os.path.join(config.global_logfoldername, "best_params.csv") config.best_core_hyperparameters_file = os.path.join( config.global_logfoldername, "best_core_hyperparams.csv") config.best_value_file = os.path.join(config.global_logfoldername, "best_value.csv") config.optimizer_internal_state_file = os.path.join( config.global_logfoldername, "optimizer_internal_state.csv") config.current_values_list_file = os.path.join(config.global_logfoldername, "current_values_list.csv") config.best_values_list_file = os.path.join(config.global_logfoldername, "best_values_list.csv") config.plot_file = os.path.join(config.global_logfoldername, "plot.csv") config.fvalues_file = os.path.join(config.global_logfoldername, "fvalues.csv") config.iteration_file = os.path.join(config.global_logfoldername, "iteration.csv") config.test_values_file = os.path.join(config.global_logfoldername, "test_values_file.csv") config.mamlpt_values_file = os.path.join(config.global_logfoldername, "mamlpt_values_file.csv") config.test_mamlpt_parallel_vals_folder = os.path.join( config.global_logfoldername, "test_mamlpt_parallel_evals_folder") return config
google-research/google-research
es_maml/config.py
Python
apache-2.0
10,748
[ "Gaussian" ]
4dfe96e3a32c6f6168294b8bf665890b56ba63fb4796ebafc89df1e75373253f
# -*- coding: utf-8 -*- # vim: tabstop=4 shiftwidth=4 softtabstop=4 # # Copyright (C) 2012-2017 GEM Foundation # # OpenQuake is free software: you can redistribute it and/or modify it # under the terms of the GNU Affero General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # OpenQuake is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with OpenQuake. If not, see <http://www.gnu.org/licenses/>. """ Module exports :class:`ChiouYoungs2014`. """ from __future__ import division import numpy as np import math from openquake.hazardlib.gsim.base import GMPE, CoeffsTable from openquake.hazardlib import const from openquake.hazardlib.imt import PGA, PGV, SA class ChiouYoungs2014(GMPE): """ Implements GMPE developed by Brian S.-J. Chiou and Robert R. Youngs and published as "Updated of the Chiou and Youngs NGA Model for the Average Horizontal Component of Peak Ground Motion and Response Spectra" (2014, Earthquake Spectra). """ #: Supported tectonic region type is active shallow crust DEFINED_FOR_TECTONIC_REGION_TYPE = const.TRT.ACTIVE_SHALLOW_CRUST #: Supported intensity measure types are spectral acceleration, #: peak ground velocity and peak ground acceleration DEFINED_FOR_INTENSITY_MEASURE_TYPES = set([ PGA, PGV, SA ]) #: Supported intensity measure component is orientation-independent #: measure :attr:`~openquake.hazardlib.const.IMC.RotD50`, DEFINED_FOR_INTENSITY_MEASURE_COMPONENT = const.IMC.RotD50 #: Supported standard deviation types are inter-event, intra-event #: and total, see chapter "Variance model". DEFINED_FOR_STANDARD_DEVIATION_TYPES = set([ const.StdDev.TOTAL, const.StdDev.INTER_EVENT, const.StdDev.INTRA_EVENT ]) #: Required site parameters are Vs30, Vs30 measured flag #: and Z1.0. REQUIRES_SITES_PARAMETERS = set(('vs30', 'vs30measured', 'z1pt0')) #: Required rupture parameters are magnitude, rake, #: dip and ztor. REQUIRES_RUPTURE_PARAMETERS = set(('dip', 'rake', 'mag', 'ztor')) #: Required distance measures are RRup, Rjb and Rx. REQUIRES_DISTANCES = set(('rrup', 'rjb', 'rx')) def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types): """ See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values. """ # extracting dictionary of coefficients specific to required # intensity measure type. C = self.COEFFS[imt] # intensity on a reference soil is used for both mean # and stddev calculations. ln_y_ref = self._get_ln_y_ref(rup, dists, C) # exp1 and exp2 are parts of eq. 12 and eq. 13, # calculate it once for both. exp1 = np.exp(C['phi3'] * (sites.vs30.clip(-np.inf, 1130) - 360)) exp2 = np.exp(C['phi3'] * (1130 - 360)) mean = self._get_mean(sites, C, ln_y_ref, exp1, exp2) stddevs = self._get_stddevs(sites, rup, C, stddev_types, ln_y_ref, exp1, exp2) return mean, stddevs def _get_mean(self, sites, C, ln_y_ref, exp1, exp2): """ Add site effects to an intensity. Implements eq. 13b. """ # we do not support estimating of basin depth and instead # rely on it being available (since we require it). # centered_z1pt0 centered_z1pt0 = self._get_centered_z1pt0(sites) # we consider random variables being zero since we want # to find the exact mean value. eta = epsilon = 0. ln_y = ( # first line of eq. 12 ln_y_ref + eta # second line + C['phi1'] * np.log(sites.vs30 / 1130).clip(-np.inf, 0) # third line + C['phi2'] * (exp1 - exp2) * np.log((np.exp(ln_y_ref) * np.exp(eta) + C['phi4']) / C['phi4']) # fourth line + C['phi5'] * (1.0 - np.exp(-1. * centered_z1pt0 / C['phi6'])) # fifth line + epsilon ) return ln_y def _get_stddevs(self, sites, rup, C, stddev_types, ln_y_ref, exp1, exp2): """ Get standard deviation for a given intensity on reference soil. Implements equations 13 for inter-event, intra-event and total standard deviations. """ Fmeasured = sites.vs30measured Finferred = 1 - sites.vs30measured # eq. 13 to calculate inter-event standard error mag_test = min(max(rup.mag, 5.0), 6.5) - 5.0 tau = C['tau1'] + (C['tau2'] - C['tau1']) / 1.5 * mag_test # b and c coeffs from eq. 10 b = C['phi2'] * (exp1 - exp2) c = C['phi4'] y_ref = np.exp(ln_y_ref) # eq. 13 NL = b * y_ref / (y_ref + c) sigma = ((C['sig1'] + (C['sig2'] - C['sig1']) * mag_test / 1.5) * np.sqrt((C['sig3'] * Finferred + 0.7 * Fmeasured) + (1. + NL) ** 2.)) ret = [] for stddev_type in stddev_types: assert stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES if stddev_type == const.StdDev.TOTAL: # eq. 13 ret += [np.sqrt(((1 + NL) ** 2) * (tau ** 2) + (sigma ** 2))] elif stddev_type == const.StdDev.INTRA_EVENT: ret.append(sigma) elif stddev_type == const.StdDev.INTER_EVENT: # this is implied in eq. 21 ret.append(np.abs((1 + NL) * tau)) return ret def _get_ln_y_ref(self, rup, dists, C): """ Get an intensity on a reference soil. Implements eq. 13a. """ # reverse faulting flag Frv = 1. if 30 <= rup.rake <= 150 else 0. # normal faulting flag Fnm = 1. if -120 <= rup.rake <= -60 else 0. # hanging wall flag Fhw = np.zeros_like(dists.rx) idx = np.nonzero(dists.rx >= 0.) Fhw[idx] = 1. # a part in eq. 11 mag_test1 = np.cosh(2. * max(rup.mag - 4.5, 0)) # centered DPP centered_dpp = self._get_centered_cdpp(dists) # centered_ztor centered_ztor = self._get_centered_ztor(rup, Frv) # dist_taper = np.fmax(1 - (np.fmax(dists.rrup - 40, np.zeros_like(dists)) / 30.), np.zeros_like(dists)) dist_taper = dist_taper.astype(np.float64) ln_y_ref = ( # first part of eq. 11 C['c1'] + (C['c1a'] + C['c1c'] / mag_test1) * Frv + (C['c1b'] + C['c1d'] / mag_test1) * Fnm + (C['c7'] + C['c7b'] / mag_test1) * centered_ztor + (C['c11'] + C['c11b'] / mag_test1) * np.cos(math.radians(rup.dip)) ** 2 # second part + C['c2'] * (rup.mag - 6) + ((C['c2'] - C['c3']) / C['cn']) * np.log(1 + np.exp(C['cn'] * (C['cm'] - rup.mag))) # third part + C['c4'] * np.log(dists.rrup + C['c5'] * np.cosh(C['c6'] * max(rup.mag - C['chm'], 0))) + (C['c4a'] - C['c4']) * np.log(np.sqrt(dists.rrup ** 2 + C['crb'] ** 2)) # forth part + (C['cg1'] + C['cg2'] / (np.cosh(max(rup.mag - C['cg3'], 0)))) * dists.rrup # fifth part + C['c8'] * dist_taper * min(max(rup.mag - 5.5, 0) / 0.8, 1.0) * np.exp(-1 * C['c8a'] * (rup.mag - C['c8b']) ** 2) * centered_dpp # sixth part + C['c9'] * Fhw * np.cos(math.radians(rup.dip)) * (C['c9a'] + (1 - C['c9a']) * np.tanh(dists.rx / C['c9b'])) * (1 - np.sqrt(dists.rjb ** 2 + rup.ztor ** 2) / (dists.rrup + 1.0)) ) return ln_y_ref def _get_centered_z1pt0(self, sites): """ Get z1pt0 centered on the Vs30- dependent avarage z1pt0(m) California and non-Japan regions """ #: California and non-Japan regions mean_z1pt0 = (-7.15 / 4.) * np.log(((sites.vs30) ** 4. + 570.94 ** 4.) / (1360 ** 4. + 570.94 ** 4.)) centered_z1pt0 = sites.z1pt0 - np.exp(mean_z1pt0) return centered_z1pt0 def _get_centered_ztor(self, rup, Frv): """ Get ztor centered on the M- dependent avarage ztor(km) by different fault types. """ if Frv == 1: mean_ztor = max(2.704 - 1.226 * max(rup.mag - 5.849, 0.0), 0.) ** 2 centered_ztor = rup.ztor - mean_ztor else: mean_ztor = max(2.673 - 1.136 * max(rup.mag - 4.970, 0.0), 0.) ** 2 centered_ztor = rup.ztor - mean_ztor return centered_ztor def _get_centered_cdpp(self, dists): """ Get directivity prediction parameter centered on the avgerage directivity prediction parameter. Here we set the centered_dpp equals to zero, since the near fault directivity effect prediction is off in our calculation. """ centered_dpp = 0. return centered_dpp #: Coefficient tables are constructed from values in tables 1 - 5 COEFFS = CoeffsTable(sa_damping=5, table="""\ IMT c1 c1a c1b c1c c1d cn cm c2 c3 c4 c4a crb c5 chm c6 c7 c7b c8 c8a c8b c9 c9a c9b c11 c11b cg1 cg2 cg3 phi1 phi2 phi3 phi4 phi5 phi6 gjpit gwn phi1jp phi5jp phi6jp tau1 tau2 sig1 sig2 sig3 sig2jp pga -1.5065 0.165 -0.255 -0.165 0.255 16.0875 4.9993 1.06 1.9636 -2.1 -0.5 50 6.4551 3.0956 0.4908 0.0352 0.0462 0. 0.2695 0.4833 0.9228 0.1202 6.8607 0. -0.4536 -0.007146 -0.006758 4.2542 -0.521 -0.1417 -0.00701 0.102151 0. 300 1.5817 0.7594 -0.6846 0.459 800. 0.4 0.26 0.4912 0.3762 0.8 0.4528 pgv 2.3549 0.165 -0.0626 -0.165 0.0626 3.3024 5.423 1.06 2.3152 -2.1 -0.5 50 5.8096 3.0514 0.4407 0.0324 0.0097 0.2154 0.2695 5. 0.3079 0.1 6.5 0 -0.3834 -0.001852 -0.007403 4.3439 -0.7936 -0.0699 -0.008444 5.41 0.0202 300. 2.2306 0.335 -0.7966 0.9488 800. 0.3894 0.2578 0.4785 0.3629 0.7504 0.3918 0.01 -1.5065 0.165 -0.255 -0.165 0.255 16.0875 4.9993 1.06 1.9636 -2.1 -0.5 50 6.4551 3.0956 0.4908 0.0352 0.0462 0. 0.2695 0.4833 0.9228 0.1202 6.8607 0. -0.4536 -0.007146 -0.006758 4.2542 -0.521 -0.1417 -0.00701 0.102151 0. 300 1.5817 0.7594 -0.6846 0.459 800. 0.4 0.26 0.4912 0.3762 0.8 0.4528 0.02 -1.4798 0.165 -0.255 -0.165 0.255 15.7118 4.9993 1.06 1.9636 -2.1 -0.5 50 6.4551 3.0963 0.4925 0.0352 0.0472 0. 0.2695 1.2144 0.9296 0.1217 6.8697 0. -0.4536 -0.007249 -0.006758 4.2386 -0.5055 -0.1364 -0.007279 0.10836 0. 300 1.574 0.7606 -0.6681 0.458 800. 0.4026 0.2637 0.4904 0.3762 0.8 0.4551 0.03 -1.2972 0.165 -0.255 -0.165 0.255 15.8819 4.9993 1.06 1.9636 -2.1 -0.5 50 6.4551 3.0974 0.4992 0.0352 0.0533 0. 0.2695 1.6421 0.9396 0.1194 6.9113 0. -0.4536 -0.007869 -0.006758 4.2519 -0.4368 -0.1403 -0.007354 0.119888 0. 300 1.5544 0.7642 -0.6314 0.462 800. 0.4063 0.2689 0.4988 0.3849 0.8 0.4571 0.04 -1.1007 0.165 -0.255 -0.165 0.255 16.4556 4.9993 1.06 1.9636 -2.1 -0.5 50 6.4551 3.0988 0.5037 0.0352 0.0596 0. 0.2695 1.9456 0.9661 0.1166 7.0271 0. -0.4536 -0.008316 -0.006758 4.296 -0.3752 -0.1591 -0.006977 0.133641 0. 300 1.5502 0.7676 -0.5855 0.453 800. 0.4095 0.2736 0.5049 0.391 0.8 0.4642 0.05 -0.9292 0.165 -0.255 -0.165 0.255 17.6453 4.9993 1.06 1.9636 -2.1 -0.5 50 6.4551 3.1011 0.5048 0.0352 0.0639 0. 0.2695 2.181 0.9794 0.1176 7.0959 0. -0.4536 -0.008743 -0.006758 4.3578 -0.3469 -0.1862 -0.006467 0.148927 0. 300 1.5391 0.7739 -0.5457 0.436 800. 0.4124 0.2777 0.5096 0.3957 0.8 0.4716 0.075 -0.658 0.165 -0.254 -0.165 0.254 20.1772 5.0031 1.06 1.9636 -2.1 -0.5 50 6.4551 3.1094 0.5048 0.0352 0.063 0. 0.2695 2.6087 1.026 0.1171 7.3298 0. -0.4536 -0.009537 -0.00619 4.5455 -0.3747 -0.2538 -0.005734 0.190596 0. 300 1.4804 0.7956 -0.4685 0.383 800. 0.4179 0.2855 0.5179 0.4043 0.8 0.5022 0.1 -0.5613 0.165 -0.253 -0.165 0.253 19.9992 5.0172 1.06 1.9636 -2.1 -0.5 50 6.8305 3.2381 0.5048 0.0352 0.0532 0. 0.2695 2.9122 1.0177 0.1146 7.2588 0. -0.4536 -0.00983 -0.005332 4.7603 -0.444 -0.2943 -0.005604 0.230662 0. 300 1.4094 0.7932 -0.4985 0.375 800. 0.4219 0.2913 0.5236 0.4104 0.8 0.523 0.12 -0.5342 0.165 -0.252 -0.165 0.252 18.7106 5.0315 1.06 1.9795 -2.1 -0.5 50 7.1333 3.3407 0.5048 0.0352 0.0452 0. 0.2695 3.1045 1.0008 0.1128 7.2372 0. -0.4536 -0.009913 -0.004732 4.8963 -0.4895 -0.3077 -0.005696 0.253169 0. 300 1.3682 0.7768 -0.5603 0.377 800. 0.4244 0.2949 0.527 0.4143 0.8 0.5278 0.15 -0.5462 0.165 -0.25 -0.165 0.25 16.6246 5.0547 1.06 2.0362 -2.1 -0.5 50 7.3621 3.43 0.5045 0.0352 0.0345 0. 0.2695 3.3399 0.9801 0.1106 7.2109 0. -0.4536 -0.009896 -0.003806 5.0644 -0.5477 -0.3113 -0.005845 0.266468 0. 300 1.3241 0.7437 -0.6451 0.379 800. 0.4275 0.2993 0.5308 0.4191 0.8 0.5304 0.17 -0.5858 0.165 -0.248 -0.165 0.248 15.3709 5.0704 1.06 2.0823 -2.1 -0.5 50 7.4365 3.4688 0.5036 0.0352 0.0283 0. 0.2695 3.4719 0.9652 0.115 7.2491 0. -0.4536 -0.009787 -0.00328 5.1371 -0.5922 -0.3062 -0.005959 0.26506 0. 300 1.3071 0.7219 -0.6981 0.38 800. 0.4292 0.3017 0.5328 0.4217 0.8 0.531 0.2 -0.6798 0.165 -0.2449 -0.165 0.2449 13.7012 5.0939 1.06 2.1521 -2.1 -0.5 50 7.4972 3.5146 0.5016 0.0352 0.0202 0. 0.2695 3.6434 0.9459 0.1208 7.2988 0. -0.444 -0.009505 -0.00269 5.188 -0.6693 -0.2927 -0.006141 0.255253 0. 300 1.2931 0.6922 -0.7653 0.384 800. 0.4313 0.3047 0.5351 0.4252 0.8 0.5312 0.25 -0.8663 0.165 -0.2382 -0.165 0.2382 11.2667 5.1315 1.06 2.2574 -2.1 -0.5 50 7.5416 3.5746 0.4971 0.0352 0.009 0. 0.2695 3.8787 0.9196 0.1208 7.3691 0. -0.3539 -0.008918 -0.002128 5.2164 -0.7766 -0.2662 -0.006439 0.231541 0. 300 1.315 0.6579 -0.8469 0.393 800. 0.4341 0.3087 0.5377 0.4299 0.7999 0.5309 0.3 -1.0514 0.165 -0.2313 -0.165 0.2313 9.1908 5.167 1.06 2.344 -2.1 -0.5 50 7.56 3.6232 0.4919 0.0352 -0.0004 0. 0.2695 4.0711 0.8829 0.1175 6.8789 0. -0.2688 -0.008251 -0.001812 5.1954 -0.8501 -0.2405 -0.006704 0.207277 0.001 300 1.3514 0.6362 -0.8999 0.408 800. 0.4363 0.3119 0.5395 0.4338 0.7997 0.5307 0.4 -1.3794 0.165 -0.2146 -0.165 0.2146 6.5459 5.2317 1.06 2.4709 -2.1 -0.5 50 7.5735 3.6945 0.4807 0.0352 -0.0155 0. 0.2695 4.3745 0.8302 0.106 6.5334 0. -0.1793 -0.007267 -0.001274 5.0899 -0.9431 -0.1975 -0.007125 0.165464 0.004 300 1.4051 0.6049 -0.9618 0.462 800. 0.4396 0.3165 0.5422 0.4399 0.7988 0.531 0.5 -1.6508 0.165 -0.1972 -0.165 0.1972 5.2305 5.2893 1.06 2.5567 -2.1 -0.5 50 7.5778 3.7401 0.4707 0.0352 -0.0278 0.0991 0.2695 4.6099 0.7884 0.1061 6.526 0. -0.1428 -0.006492 -0.001074 4.7854 -1.0044 -0.1633 -0.007435 0.133828 0.01 300 1.4402 0.5507 -0.9945 0.524 800. 0.4419 0.3199 0.5433 0.4446 0.7966 0.5313 0.75 -2.1511 0.165 -0.162 -0.165 0.162 3.7896 5.4109 1.06 2.6812 -2.1 -0.5 50 7.5808 3.7941 0.4575 0.0352 -0.0477 0.1982 0.2695 5.0376 0.6754 0.1 6.5 0. -0.1138 -0.005147 -0.001115 4.3304 -1.0602 -0.1028 -0.00812 0.085153 0.034 300 1.528 0.3582 -1.0225 0.658 800. 0.4459 0.3255 0.5294 0.4533 0.7792 0.5309 1 -2.5365 0.165 -0.14 -0.165 0.14 3.3024 5.5106 1.06 2.7474 -2.1 -0.5 50 7.5814 3.8144 0.4522 0.0352 -0.0559 0.2154 0.2695 5.3411 0.6196 0.1 6.5 0. -0.1062 -0.004277 -0.001197 4.1667 -1.0941 -0.0699 -0.008444 0.058595 0.067 300 1.6523 0.2003 -1.0002 0.78 800. 0.4484 0.3291 0.5105 0.4594 0.7504 0.5302 1.5 -3.0686 0.165 -0.1184 -0.165 0.1184 2.8498 5.6705 1.06 2.8161 -2.1 -0.5 50 7.5817 3.8284 0.4501 0.0352 -0.063 0.2154 0.2695 5.7688 0.5101 0.1 6.5 0. -0.102 -0.002979 -0.001675 4.0029 -1.1142 -0.0425 -0.007707 0.031787 0.143 300 1.8872 0.0356 -0.9245 0.96 800. 0.4515 0.3335 0.4783 0.468 0.7136 0.5276 2 -3.4148 0.1645 -0.11 -0.1645 0.11 2.5417 5.7981 1.06 2.8514 -2.1 -0.5 50 7.5818 3.833 0.45 0.0352 -0.0665 0.2154 0.2695 6.0723 0.3917 0.1 6.5 0. -0.1009 -0.002301 -0.002349 3.8949 -1.1154 -0.0302 -0.004792 0.019716 0.203 300 2.1348 0. -0.8626 1.11 800. 0.4534 0.3363 0.4681 0.4681 0.7035 0.5167 3 -3.9013 0.1168 -0.104 -0.1168 0.104 2.1488 5.9983 1.06 2.8875 -2.1 -0.5 50 7.5818 3.8361 0.45 0.016 -0.0516 0.2154 0.2695 6.5 0.1244 0.1 6.5 0. -0.1003 -0.001344 -0.003306 3.7928 -1.1081 -0.0129 -0.001828 0.009643 0.277 300 3.5752 0. -0.7882 1.291 800. 0.4558 0.3398 0.4617 0.4617 0.7006 0.4917 4 -4.2466 0.0732 -0.102 -0.0732 0.102 1.8957 6.1552 1.06 2.9058 -2.1 -0.5 50 7.5818 3.8369 0.45 0.0062 -0.0448 0.2154 0.2695 6.8035 0.0086 0.1 6.5 0. -0.1001 -0.001084 -0.003566 3.7443 -1.0603 -0.0016 -0.001523 0.005379 0.309 300 3.8646 0. -0.7195 1.387 800. 0.4574 0.3419 0.4571 0.4571 0.7001 0.4682 5 -4.5143 0.0484 -0.101 -0.0484 0.101 1.7228 6.2856 1.06 2.9169 -2.1 -0.5 50 7.5818 3.8376 0.45 0.0029 -0.0424 0.2154 0.2695 7.0389 0. 0.1 6.5 0. -0.1001 -0.00101 -0.00364 3.709 -0.9872 0. -0.00144 0.003223 0.321 300 3.7292 0. -0.656 1.433 800. 0.4584 0.3435 0.4535 0.4535 0.7 0.4517 7.5 -5.0009 0.022 -0.101 -0.022 0.101 1.5737 6.5428 1.06 2.932 -2.1 -0.5 50 7.5818 3.838 0.45 0.0007 -0.0348 0.2154 0.2695 7.4666 0. 0.1 6.5 0. -0.1 -0.000964 -0.003686 3.6632 -0.8274 0. -0.001369 0.001134 0.329 300 2.3763 0. -0.5202 1.46 800. 0.4601 0.3459 0.4471 0.4471 0.7 0.4167 10 -5.3461 0.0124 -0.1 -0.0124 0.1 1.5265 6.7415 1.06 2.9396 -2.1 -0.5 50 7.5818 3.838 0.45 0.0003 -0.0253 0.2154 0.2695 7.77 0. 0.1 6.5 0. -0.1 -0.00095 -0.0037 3.623 -0.7053 0. -0.001361 0.000515 0.33 300 1.7679 0. -0.4068 1.464 800. 0.4612 0.3474 0.4426 0.4426 0.7 0.3755 """) class ChiouYoungs2014PEER(ChiouYoungs2014): """ This implements the Chiou & Youngs (2014) GMPE for use with the PEER tests. In this version the total standard deviation is fixed at 0.65 """ #: Only the total standars deviation is defined DEFINED_FOR_STANDARD_DEVIATION_TYPES = set([ const.StdDev.TOTAL, ]) #: The PEER tests requires only PGA DEFINED_FOR_INTENSITY_MEASURE_TYPES = set([ PGA, ]) def _get_stddevs(self, sites, rup, C, stddev_types, ln_y_ref, exp1, exp2): """ Returns the standard deviation, which is fixed at 0.65 for every site """ ret = [] for stddev_type in stddev_types: assert stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES if stddev_type == const.StdDev.TOTAL: # eq. 13 ret.append(0.65 * np.ones_like(sites.vs30)) return ret class ChiouYoungs2014NearFaultEffect(ChiouYoungs2014): """ This implements the Chiou & Youngs (2014) GMPE include the near fault effect prediction. In this version, we add the distance measure, rcdpp for directivity prediction. """ #: Required distance measures are RRup, Rjb, Rx, and Rcdpp REQUIRES_DISTANCES = set(('rrup', 'rjb', 'rx', 'rcdpp')) def _get_centered_cdpp(self, dists): """ Get directivity prediction parameter centered on the avgerage directivity prediction parameter. """ centered_dpp = dists.rcdpp return centered_dpp
gem/oq-hazardlib
openquake/hazardlib/gsim/chiou_youngs_2014.py
Python
agpl-3.0
21,363
[ "Brian" ]
98b09926bf57e9d5a0b46f8162948197590f4b4cf2e158efb14359e954e5b931
#!/usr/bin/env python from scipy import interpolate from glob import glob import os import cosmology import numpy import matplotlib.pyplot as plt import matplotlib from scipy.stats import linregress Polygon = matplotlib.patches.Polygon def mag_z(axes): LBCG = 4.0 z = numpy.arange(0.01, 1.5, 0.01) mstar = mi_star_evol(z) mstar_sub = mstar - 2.5 * numpy.log10(0.4) BCG = mstar - 2.5 * numpy.log10(LBCG) axes.plot(z, mstar_sub, 'k-', linewidth=0.5, label='$0.4L_\star$ galaxy') axes.plot(z, mstar, 'k-', linewidth=1.5, label='$L_\star$ galaxy') axes.plot(z, BCG, 'k-', linewidth=2.5, label='$%dL_\star$ (BCG)' % LBCG) axes.set_xlabel('Redshift') axes.legend(loc='lower right', fancybox=True, shadow=True) axes.set_xlim(0.05, 1.5) axes.set_ylim(20, 26) return def calc_completeness_model(fields): ''' Calculates the completeness using a histogram. ''' from astropy.io import ascii bins = numpy.arange(15, 30, 0.5) centers = (bins[:-1] + bins[1:]) / 2 data_dir = '../data/proc2_small/' completeness_hist = [] for f in fields: cat = '{}/{}/{}i_cal.cat'.format(data_dir, f, f) cat = ascii.read(cat) cat = cat.to_pandas() cat = cat.loc[cat.MAG_AUTO < 40] cat = cat.loc[cat.CLASS_STAR < 0.8] # make a bunch of figures n, bins_ = numpy.histogram(cat['MAG_AUTO'], bins=bins) # make it a log plot logn = numpy.log10(n) # find the peak peak = numpy.argmax(logn) # make a model from mag 18.5 - 21.5 model = linregress(centers[peak - 5:peak], logn[peak - 5:peak]) # convert the linear model in lin-log space to log in linear space # and figure out where 80% completeness is # see https://en.wikipedia.org/wiki/Semi-log_plot y = n / (10**model.intercept * 10**(centers * model.slope)) x = centers # plot(y, x) to see how the ratio curve goes. func = interpolate.interp1d(x, y) # the interpolate wasn't doing very well... # when just asked what is 80% mags = numpy.arange(centers[0], centers[-1], 0.1) magdiff = 0.8 - func(mags) # find the last bin where the difference is negative # this is the bin, with the highest magnitude, where we go from having # more observed objects to more objects in the model. mag_idx = numpy.where(magdiff < 0)[0][-1] completeness_hist.append(mags[mag_idx]) print(f, '{:.3f}'.format(mags[mag_idx])) return completeness_hist def mag_lim_hist(axes): Ngal_o = 100 m1 = 20.0 m2 = 25.0 dm = 0.2 Niter = 10 filter = 'i' path = '../data/sims/Catalogs_Gal_small/' # find all of the fields we have hunted imgs = glob('../cluster_search/round2/PSZ*/**/*A.png', recursive=True) fields = [i.split('/')[-2] for i in imgs] mag = numpy.arange(m1, m2, dm) completeness = [] for f in fields: frac = numpy.zeros_like(mag) Ngal = Ngal_o * Niter for i, m in enumerate(mag): cmd = "cat %s/%s_m%.2f_%s_%s.dat | wc" % (path, f, mag[i], filter, '*') # Do simple cat + wc and redirect and split stdout Nobs = os.popen(cmd).readline().split()[0] frac[i] = float(Nobs) / Ngal # figure out the 80% completeness func = interpolate.interp1d(frac, mag) try: completeness.append(func(0.8)) except ValueError: completeness.append(mag[numpy.argmax(frac)]) axes.hist(completeness, bins=mag, color='#348abd', orientation='horizontal') #axes.hist(completeness_low, bins=mag, color='#348abd') #axes.set_ylabel('$N_{fields}$') axes.set_xlabel('$N_{fields}$') return axes def mag_lim_hist_model(axes): m1 = 20.0 m2 = 25.0 dm = 0.2 mag = numpy.arange(m1, m2, dm) # find all of the fields we have hunted imgs = glob('../cluster_search/round2/PSZ*/**/*A.png', recursive=True) fields = [i.split('/')[-2] for i in imgs] completeness = calc_completeness_model(fields) axes.hist(completeness, bins=mag, color='#348abd', orientation='horizontal', histtype='stepfilled') # flip the axis axes.invert_xaxis() axes.set_ylim(20, 26) axes.set_xlabel('$N_{fields}$') axes.set_ylabel('Limiting i Magnitude') return axes, fields, completeness # observed mi_star as a function of redshift def mi_star_evol(z, h=0.7, cosmo=(0.3, 0.7, 0.7)): # Blanton's number i.e. M* - 1.5 mags BCSPIPE = '/home/boada/Projects/planckClusters/MOSAICpipe' evolfile = "1_0gyr_hr_m62_salp.color" evolfile = os.path.join(BCSPIPE, "LIB/evol", evolfile) k, ev, c = KEfit(evolfile) dlum = cosmology.dl(z, cosmology=cosmo) # Blanton M* Mi_star = -21.22 - 5 * numpy.log10(h) # + self.evf['i'](z)[0] dlum = cosmology.dl(z, cosmology=cosmo) DM = 25.0 + 5.0 * numpy.log10(dlum) mx = Mi_star + DM + k['i'](z) + ev['i'](z) - ev['i'](0.1) return mx ################################################################## # Read both kcorrection k(z) and evolution ev(z) from BC03 model ################################################################## def KEfit(modelfile): import scipy import scipy.interpolate import tableio print("# Getting K(z) and Ev(z) corrections from file: %s\n" % modelfile) e = {} k = {} c = {} (z, c_gr, c_ri, c_iz, k_g, k_r, k_i, k_z, e_g, e_r, e_i, e_z) = tableio.get_data( modelfile, cols=(0, 3, 4, 5, 10, 11, 12, 13, 14, 15, 16, 17)) # K-only correction at each age SED, k['g'] = scipy.interpolate.interp1d(z, k_g) k['r'] = scipy.interpolate.interp1d(z, k_r) k['i'] = scipy.interpolate.interp1d(z, k_i) k['z'] = scipy.interpolate.interp1d(z, k_z) # Evolution term alone e['g'] = scipy.interpolate.interp1d(z, e_g) e['r'] = scipy.interpolate.interp1d(z, e_r) e['i'] = scipy.interpolate.interp1d(z, e_i) e['z'] = scipy.interpolate.interp1d(z, e_z) # Color redshift c['gr'] = scipy.interpolate.interp1d(z, c_gr) c['ri'] = scipy.interpolate.interp1d(z, c_ri) c['iz'] = scipy.interpolate.interp1d(z, c_iz) return k, e, c def add_z_cl(ax, fields, completeness): # fix the path to get the results import sys sys.path.append('../results/') from get_results import loadClusters # confirmed clusters high_conf = ['PSZ1_G206.45+13.89', 'PSZ1_G224.82+13.62', 'PSZ2_G029.66-47.63', 'PSZ2_G043.44-41.27', 'PSZ2_G096.43-20.89', 'PSZ2_G120.76+44.14', 'PSZ2_G125.55+32.72', 'PSZ2_G137.24+53.93', 'PSZ2_G305.76+44.79', 'PSZ2_G107.83-45.45', 'PSZ2_G098.38+77.22', 'PSZ1_G084.62-15.86', 'PSZ2_G106.11+24.11', 'PSZ2_G173.76+22.92', 'PSZ2_G191.82-26.64'] # get the density for the confirmed fields. depth = [completeness[fields.index(hc)] for hc in numpy.sort(high_conf)] # the confirmed = True gets the 15 confirmed clusters results = loadClusters(round=3, confirmed=True) # sort the results results.sort_values('Cluster', inplace=True) ax.scatter(results['z_cl_boada'], depth, s=150, marker='*', color='#e24a33') return ax if __name__ == "__main__": f = plt.figure(figsize=(7, 7 * (numpy.sqrt(5.) - 1.0) / 2.0)) ax = plt.subplot2grid((1, 4), (0, 0), colspan=2) axs = plt.subplot2grid((1, 4), (0, 2), colspan=2) ax, fields, completeness = mag_lim_hist_model(ax) mag_z(axs) add_z_cl(axs, fields, completeness) plt.tight_layout() plt.show()
boada/planckClusters
sims/plot_mag_z_relation.py
Python
mit
7,866
[ "Galaxy" ]
8c19d27b2e66e0f762769b451492d68555b25624b77d0b073196aa08093f5148
# Copyright 2002 Gary Strangman. All rights reserved # Copyright 2002-2016 The SciPy Developers # # The original code from Gary Strangman was heavily adapted for # use in SciPy by Travis Oliphant. The original code came with the # following disclaimer: # # This software is provided "as-is". There are no expressed or implied # warranties of any kind, including, but not limited to, the warranties # of merchantability and fitness for a given application. In no event # shall Gary Strangman be liable for any direct, indirect, incidental, # special, exemplary or consequential damages (including, but not limited # to, loss of use, data or profits, or business interruption) however # caused and on any theory of liability, whether in contract, strict # liability or tort (including negligence or otherwise) arising in any way # out of the use of this software, even if advised of the possibility of # such damage. """ A collection of basic statistical functions for Python. The function names appear below. Some scalar functions defined here are also available in the scipy.special package where they work on arbitrary sized arrays. Disclaimers: The function list is obviously incomplete and, worse, the functions are not optimized. All functions have been tested (some more so than others), but they are far from bulletproof. Thus, as with any free software, no warranty or guarantee is expressed or implied. :-) A few extra functions that don't appear in the list below can be found by interested treasure-hunters. These functions don't necessarily have both list and array versions but were deemed useful. Central Tendency ---------------- .. autosummary:: :toctree: generated/ gmean hmean mode Moments ------- .. autosummary:: :toctree: generated/ moment variation skew kurtosis normaltest Altered Versions ---------------- .. autosummary:: :toctree: generated/ tmean tvar tstd tsem describe Frequency Stats --------------- .. autosummary:: :toctree: generated/ itemfreq scoreatpercentile percentileofscore cumfreq relfreq Variability ----------- .. autosummary:: :toctree: generated/ obrientransform sem zmap zscore gstd iqr median_absolute_deviation Trimming Functions ------------------ .. autosummary:: :toctree: generated/ trimboth trim1 Correlation Functions --------------------- .. autosummary:: :toctree: generated/ pearsonr fisher_exact spearmanr pointbiserialr kendalltau weightedtau linregress theilslopes Inferential Stats ----------------- .. autosummary:: :toctree: generated/ ttest_1samp ttest_ind ttest_ind_from_stats ttest_rel chisquare power_divergence ks_2samp epps_singleton_2samp mannwhitneyu ranksums wilcoxon kruskal friedmanchisquare brunnermunzel combine_pvalues Statistical Distances --------------------- .. autosummary:: :toctree: generated/ wasserstein_distance energy_distance ANOVA Functions --------------- .. autosummary:: :toctree: generated/ f_oneway Support Functions ----------------- .. autosummary:: :toctree: generated/ rankdata rvs_ratio_uniforms References ---------- .. [CRCProbStat2000] Zwillinger, D. and Kokoska, S. (2000). CRC Standard Probability and Statistics Tables and Formulae. Chapman & Hall: New York. 2000. """ from __future__ import division, print_function, absolute_import import warnings import sys import math if sys.version_info >= (3, 5): from math import gcd else: from fractions import gcd from collections import namedtuple import numpy as np from numpy import array, asarray, ma from scipy._lib.six import callable, string_types from scipy._lib._version import NumpyVersion from scipy._lib._util import _lazywhere import scipy.special as special from scipy import linalg from . import distributions from . import mstats_basic from ._stats_mstats_common import (_find_repeats, linregress, theilslopes, siegelslopes) from ._stats import _kendall_dis, _toint64, _weightedrankedtau from ._rvs_sampling import rvs_ratio_uniforms from ._hypotests import epps_singleton_2samp __all__ = ['find_repeats', 'gmean', 'hmean', 'mode', 'tmean', 'tvar', 'tmin', 'tmax', 'tstd', 'tsem', 'moment', 'variation', 'skew', 'kurtosis', 'describe', 'skewtest', 'kurtosistest', 'normaltest', 'jarque_bera', 'itemfreq', 'scoreatpercentile', 'percentileofscore', 'cumfreq', 'relfreq', 'obrientransform', 'sem', 'zmap', 'zscore', 'iqr', 'gstd', 'median_absolute_deviation', 'sigmaclip', 'trimboth', 'trim1', 'trim_mean', 'f_oneway', 'PearsonRConstantInputWarning', 'PearsonRNearConstantInputWarning', 'pearsonr', 'fisher_exact', 'spearmanr', 'pointbiserialr', 'kendalltau', 'weightedtau', 'linregress', 'siegelslopes', 'theilslopes', 'ttest_1samp', 'ttest_ind', 'ttest_ind_from_stats', 'ttest_rel', 'kstest', 'chisquare', 'power_divergence', 'ks_2samp', 'mannwhitneyu', 'tiecorrect', 'ranksums', 'kruskal', 'friedmanchisquare', 'rankdata', 'rvs_ratio_uniforms', 'combine_pvalues', 'wasserstein_distance', 'energy_distance', 'brunnermunzel', 'epps_singleton_2samp'] def _chk_asarray(a, axis): if axis is None: a = np.ravel(a) outaxis = 0 else: a = np.asarray(a) outaxis = axis if a.ndim == 0: a = np.atleast_1d(a) return a, outaxis def _chk2_asarray(a, b, axis): if axis is None: a = np.ravel(a) b = np.ravel(b) outaxis = 0 else: a = np.asarray(a) b = np.asarray(b) outaxis = axis if a.ndim == 0: a = np.atleast_1d(a) if b.ndim == 0: b = np.atleast_1d(b) return a, b, outaxis def _contains_nan(a, nan_policy='propagate'): policies = ['propagate', 'raise', 'omit'] if nan_policy not in policies: raise ValueError("nan_policy must be one of {%s}" % ', '.join("'%s'" % s for s in policies)) try: # Calling np.sum to avoid creating a huge array into memory # e.g. np.isnan(a).any() with np.errstate(invalid='ignore'): contains_nan = np.isnan(np.sum(a)) except TypeError: # This can happen when attempting to sum things which are not # numbers (e.g. as in the function `mode`). Try an alternative method: try: contains_nan = np.nan in set(a.ravel()) except TypeError: # Don't know what to do. Fall back to omitting nan values and # issue a warning. contains_nan = False nan_policy = 'omit' warnings.warn("The input array could not be properly checked for nan " "values. nan values will be ignored.", RuntimeWarning) if contains_nan and nan_policy == 'raise': raise ValueError("The input contains nan values") return (contains_nan, nan_policy) def gmean(a, axis=0, dtype=None): """ Compute the geometric mean along the specified axis. Return the geometric average of the array elements. That is: n-th root of (x1 * x2 * ... * xn) Parameters ---------- a : array_like Input array or object that can be converted to an array. axis : int or None, optional Axis along which the geometric mean is computed. Default is 0. If None, compute over the whole array `a`. dtype : dtype, optional Type of the returned array and of the accumulator in which the elements are summed. If dtype is not specified, it defaults to the dtype of a, unless a has an integer dtype with a precision less than that of the default platform integer. In that case, the default platform integer is used. Returns ------- gmean : ndarray see dtype parameter above See Also -------- numpy.mean : Arithmetic average numpy.average : Weighted average hmean : Harmonic mean Notes ----- The geometric average is computed over a single dimension of the input array, axis=0 by default, or all values in the array if axis=None. float64 intermediate and return values are used for integer inputs. Use masked arrays to ignore any non-finite values in the input or that arise in the calculations such as Not a Number and infinity because masked arrays automatically mask any non-finite values. Examples -------- >>> from scipy.stats import gmean >>> gmean([1, 4]) 2.0 >>> gmean([1, 2, 3, 4, 5, 6, 7]) 3.3800151591412964 """ if not isinstance(a, np.ndarray): # if not an ndarray object attempt to convert it log_a = np.log(np.array(a, dtype=dtype)) elif dtype: # Must change the default dtype allowing array type if isinstance(a, np.ma.MaskedArray): log_a = np.log(np.ma.asarray(a, dtype=dtype)) else: log_a = np.log(np.asarray(a, dtype=dtype)) else: log_a = np.log(a) return np.exp(log_a.mean(axis=axis)) def hmean(a, axis=0, dtype=None): """ Calculate the harmonic mean along the specified axis. That is: n / (1/x1 + 1/x2 + ... + 1/xn) Parameters ---------- a : array_like Input array, masked array or object that can be converted to an array. axis : int or None, optional Axis along which the harmonic mean is computed. Default is 0. If None, compute over the whole array `a`. dtype : dtype, optional Type of the returned array and of the accumulator in which the elements are summed. If `dtype` is not specified, it defaults to the dtype of `a`, unless `a` has an integer `dtype` with a precision less than that of the default platform integer. In that case, the default platform integer is used. Returns ------- hmean : ndarray see `dtype` parameter above See Also -------- numpy.mean : Arithmetic average numpy.average : Weighted average gmean : Geometric mean Notes ----- The harmonic mean is computed over a single dimension of the input array, axis=0 by default, or all values in the array if axis=None. float64 intermediate and return values are used for integer inputs. Use masked arrays to ignore any non-finite values in the input or that arise in the calculations such as Not a Number and infinity. Examples -------- >>> from scipy.stats import hmean >>> hmean([1, 4]) 1.6000000000000001 >>> hmean([1, 2, 3, 4, 5, 6, 7]) 2.6997245179063363 """ if not isinstance(a, np.ndarray): a = np.array(a, dtype=dtype) if np.all(a > 0): # Harmonic mean only defined if greater than zero if isinstance(a, np.ma.MaskedArray): size = a.count(axis) else: if axis is None: a = a.ravel() size = a.shape[0] else: size = a.shape[axis] return size / np.sum(1.0 / a, axis=axis, dtype=dtype) else: raise ValueError("Harmonic mean only defined if all elements greater " "than zero") ModeResult = namedtuple('ModeResult', ('mode', 'count')) def mode(a, axis=0, nan_policy='propagate'): """ Return an array of the modal (most common) value in the passed array. If there is more than one such value, only the smallest is returned. The bin-count for the modal bins is also returned. Parameters ---------- a : array_like n-dimensional array of which to find mode(s). axis : int or None, optional Axis along which to operate. Default is 0. If None, compute over the whole array `a`. nan_policy : {'propagate', 'raise', 'omit'}, optional Defines how to handle when input contains nan. 'propagate' returns nan, 'raise' throws an error, 'omit' performs the calculations ignoring nan values. Default is 'propagate'. Returns ------- mode : ndarray Array of modal values. count : ndarray Array of counts for each mode. Examples -------- >>> a = np.array([[6, 8, 3, 0], ... [3, 2, 1, 7], ... [8, 1, 8, 4], ... [5, 3, 0, 5], ... [4, 7, 5, 9]]) >>> from scipy import stats >>> stats.mode(a) (array([[3, 1, 0, 0]]), array([[1, 1, 1, 1]])) To get mode of whole array, specify ``axis=None``: >>> stats.mode(a, axis=None) (array([3]), array([3])) """ a, axis = _chk_asarray(a, axis) if a.size == 0: return ModeResult(np.array([]), np.array([])) contains_nan, nan_policy = _contains_nan(a, nan_policy) if contains_nan and nan_policy == 'omit': a = ma.masked_invalid(a) return mstats_basic.mode(a, axis) if a.dtype == object and np.nan in set(a.ravel()): # Fall back to a slower method since np.unique does not work with NaN scores = set(np.ravel(a)) # get ALL unique values testshape = list(a.shape) testshape[axis] = 1 oldmostfreq = np.zeros(testshape, dtype=a.dtype) oldcounts = np.zeros(testshape, dtype=int) for score in scores: template = (a == score) counts = np.expand_dims(np.sum(template, axis), axis) mostfrequent = np.where(counts > oldcounts, score, oldmostfreq) oldcounts = np.maximum(counts, oldcounts) oldmostfreq = mostfrequent return ModeResult(mostfrequent, oldcounts) def _mode1D(a): vals, cnts = np.unique(a, return_counts=True) return vals[cnts.argmax()], cnts.max() # np.apply_along_axis will convert the _mode1D tuples to a numpy array, casting types in the process # This recreates the results without that issue # View of a, rotated so the requested axis is last in_dims = list(range(a.ndim)) a_view = np.transpose(a, in_dims[:axis] + in_dims[axis+1:] + [axis]) inds = np.ndindex(a_view.shape[:-1]) modes = np.empty(a_view.shape[:-1], dtype=a.dtype) counts = np.zeros(a_view.shape[:-1], dtype=np.int) for ind in inds: modes[ind], counts[ind] = _mode1D(a_view[ind]) newshape = list(a.shape) newshape[axis] = 1 return ModeResult(modes.reshape(newshape), counts.reshape(newshape)) def _mask_to_limits(a, limits, inclusive): """Mask an array for values outside of given limits. This is primarily a utility function. Parameters ---------- a : array limits : (float or None, float or None) A tuple consisting of the (lower limit, upper limit). Values in the input array less than the lower limit or greater than the upper limit will be masked out. None implies no limit. inclusive : (bool, bool) A tuple consisting of the (lower flag, upper flag). These flags determine whether values exactly equal to lower or upper are allowed. Returns ------- A MaskedArray. Raises ------ A ValueError if there are no values within the given limits. """ lower_limit, upper_limit = limits lower_include, upper_include = inclusive am = ma.MaskedArray(a) if lower_limit is not None: if lower_include: am = ma.masked_less(am, lower_limit) else: am = ma.masked_less_equal(am, lower_limit) if upper_limit is not None: if upper_include: am = ma.masked_greater(am, upper_limit) else: am = ma.masked_greater_equal(am, upper_limit) if am.count() == 0: raise ValueError("No array values within given limits") return am def tmean(a, limits=None, inclusive=(True, True), axis=None): """ Compute the trimmed mean. This function finds the arithmetic mean of given values, ignoring values outside the given `limits`. Parameters ---------- a : array_like Array of values. limits : None or (lower limit, upper limit), optional Values in the input array less than the lower limit or greater than the upper limit will be ignored. When limits is None (default), then all values are used. Either of the limit values in the tuple can also be None representing a half-open interval. inclusive : (bool, bool), optional A tuple consisting of the (lower flag, upper flag). These flags determine whether values exactly equal to the lower or upper limits are included. The default value is (True, True). axis : int or None, optional Axis along which to compute test. Default is None. Returns ------- tmean : float See also -------- trim_mean : returns mean after trimming a proportion from both tails. Examples -------- >>> from scipy import stats >>> x = np.arange(20) >>> stats.tmean(x) 9.5 >>> stats.tmean(x, (3,17)) 10.0 """ a = asarray(a) if limits is None: return np.mean(a, None) am = _mask_to_limits(a.ravel(), limits, inclusive) return am.mean(axis=axis) def tvar(a, limits=None, inclusive=(True, True), axis=0, ddof=1): """ Compute the trimmed variance. This function computes the sample variance of an array of values, while ignoring values which are outside of given `limits`. Parameters ---------- a : array_like Array of values. limits : None or (lower limit, upper limit), optional Values in the input array less than the lower limit or greater than the upper limit will be ignored. When limits is None, then all values are used. Either of the limit values in the tuple can also be None representing a half-open interval. The default value is None. inclusive : (bool, bool), optional A tuple consisting of the (lower flag, upper flag). These flags determine whether values exactly equal to the lower or upper limits are included. The default value is (True, True). axis : int or None, optional Axis along which to operate. Default is 0. If None, compute over the whole array `a`. ddof : int, optional Delta degrees of freedom. Default is 1. Returns ------- tvar : float Trimmed variance. Notes ----- `tvar` computes the unbiased sample variance, i.e. it uses a correction factor ``n / (n - 1)``. Examples -------- >>> from scipy import stats >>> x = np.arange(20) >>> stats.tvar(x) 35.0 >>> stats.tvar(x, (3,17)) 20.0 """ a = asarray(a) a = a.astype(float) if limits is None: return a.var(ddof=ddof, axis=axis) am = _mask_to_limits(a, limits, inclusive) amnan = am.filled(fill_value=np.nan) return np.nanvar(amnan, ddof=ddof, axis=axis) def tmin(a, lowerlimit=None, axis=0, inclusive=True, nan_policy='propagate'): """ Compute the trimmed minimum. This function finds the miminum value of an array `a` along the specified axis, but only considering values greater than a specified lower limit. Parameters ---------- a : array_like array of values lowerlimit : None or float, optional Values in the input array less than the given limit will be ignored. When lowerlimit is None, then all values are used. The default value is None. axis : int or None, optional Axis along which to operate. Default is 0. If None, compute over the whole array `a`. inclusive : {True, False}, optional This flag determines whether values exactly equal to the lower limit are included. The default value is True. nan_policy : {'propagate', 'raise', 'omit'}, optional Defines how to handle when input contains nan. 'propagate' returns nan, 'raise' throws an error, 'omit' performs the calculations ignoring nan values. Default is 'propagate'. Returns ------- tmin : float, int or ndarray Examples -------- >>> from scipy import stats >>> x = np.arange(20) >>> stats.tmin(x) 0 >>> stats.tmin(x, 13) 13 >>> stats.tmin(x, 13, inclusive=False) 14 """ a, axis = _chk_asarray(a, axis) am = _mask_to_limits(a, (lowerlimit, None), (inclusive, False)) contains_nan, nan_policy = _contains_nan(am, nan_policy) if contains_nan and nan_policy == 'omit': am = ma.masked_invalid(am) res = ma.minimum.reduce(am, axis).data if res.ndim == 0: return res[()] return res def tmax(a, upperlimit=None, axis=0, inclusive=True, nan_policy='propagate'): """ Compute the trimmed maximum. This function computes the maximum value of an array along a given axis, while ignoring values larger than a specified upper limit. Parameters ---------- a : array_like array of values upperlimit : None or float, optional Values in the input array greater than the given limit will be ignored. When upperlimit is None, then all values are used. The default value is None. axis : int or None, optional Axis along which to operate. Default is 0. If None, compute over the whole array `a`. inclusive : {True, False}, optional This flag determines whether values exactly equal to the upper limit are included. The default value is True. nan_policy : {'propagate', 'raise', 'omit'}, optional Defines how to handle when input contains nan. 'propagate' returns nan, 'raise' throws an error, 'omit' performs the calculations ignoring nan values. Default is 'propagate'. Returns ------- tmax : float, int or ndarray Examples -------- >>> from scipy import stats >>> x = np.arange(20) >>> stats.tmax(x) 19 >>> stats.tmax(x, 13) 13 >>> stats.tmax(x, 13, inclusive=False) 12 """ a, axis = _chk_asarray(a, axis) am = _mask_to_limits(a, (None, upperlimit), (False, inclusive)) contains_nan, nan_policy = _contains_nan(am, nan_policy) if contains_nan and nan_policy == 'omit': am = ma.masked_invalid(am) res = ma.maximum.reduce(am, axis).data if res.ndim == 0: return res[()] return res def tstd(a, limits=None, inclusive=(True, True), axis=0, ddof=1): """ Compute the trimmed sample standard deviation. This function finds the sample standard deviation of given values, ignoring values outside the given `limits`. Parameters ---------- a : array_like array of values limits : None or (lower limit, upper limit), optional Values in the input array less than the lower limit or greater than the upper limit will be ignored. When limits is None, then all values are used. Either of the limit values in the tuple can also be None representing a half-open interval. The default value is None. inclusive : (bool, bool), optional A tuple consisting of the (lower flag, upper flag). These flags determine whether values exactly equal to the lower or upper limits are included. The default value is (True, True). axis : int or None, optional Axis along which to operate. Default is 0. If None, compute over the whole array `a`. ddof : int, optional Delta degrees of freedom. Default is 1. Returns ------- tstd : float Notes ----- `tstd` computes the unbiased sample standard deviation, i.e. it uses a correction factor ``n / (n - 1)``. Examples -------- >>> from scipy import stats >>> x = np.arange(20) >>> stats.tstd(x) 5.9160797830996161 >>> stats.tstd(x, (3,17)) 4.4721359549995796 """ return np.sqrt(tvar(a, limits, inclusive, axis, ddof)) def tsem(a, limits=None, inclusive=(True, True), axis=0, ddof=1): """ Compute the trimmed standard error of the mean. This function finds the standard error of the mean for given values, ignoring values outside the given `limits`. Parameters ---------- a : array_like array of values limits : None or (lower limit, upper limit), optional Values in the input array less than the lower limit or greater than the upper limit will be ignored. When limits is None, then all values are used. Either of the limit values in the tuple can also be None representing a half-open interval. The default value is None. inclusive : (bool, bool), optional A tuple consisting of the (lower flag, upper flag). These flags determine whether values exactly equal to the lower or upper limits are included. The default value is (True, True). axis : int or None, optional Axis along which to operate. Default is 0. If None, compute over the whole array `a`. ddof : int, optional Delta degrees of freedom. Default is 1. Returns ------- tsem : float Notes ----- `tsem` uses unbiased sample standard deviation, i.e. it uses a correction factor ``n / (n - 1)``. Examples -------- >>> from scipy import stats >>> x = np.arange(20) >>> stats.tsem(x) 1.3228756555322954 >>> stats.tsem(x, (3,17)) 1.1547005383792515 """ a = np.asarray(a).ravel() if limits is None: return a.std(ddof=ddof) / np.sqrt(a.size) am = _mask_to_limits(a, limits, inclusive) sd = np.sqrt(np.ma.var(am, ddof=ddof, axis=axis)) return sd / np.sqrt(am.count()) ##################################### # MOMENTS # ##################################### def moment(a, moment=1, axis=0, nan_policy='propagate'): r""" Calculate the nth moment about the mean for a sample. A moment is a specific quantitative measure of the shape of a set of points. It is often used to calculate coefficients of skewness and kurtosis due to its close relationship with them. Parameters ---------- a : array_like data moment : int or array_like of ints, optional order of central moment that is returned. Default is 1. axis : int or None, optional Axis along which the central moment is computed. Default is 0. If None, compute over the whole array `a`. nan_policy : {'propagate', 'raise', 'omit'}, optional Defines how to handle when input contains nan. 'propagate' returns nan, 'raise' throws an error, 'omit' performs the calculations ignoring nan values. Default is 'propagate'. Returns ------- n-th central moment : ndarray or float The appropriate moment along the given axis or over all values if axis is None. The denominator for the moment calculation is the number of observations, no degrees of freedom correction is done. See also -------- kurtosis, skew, describe Notes ----- The k-th central moment of a data sample is: .. math:: m_k = \frac{1}{n} \sum_{i = 1}^n (x_i - \bar{x})^k Where n is the number of samples and x-bar is the mean. This function uses exponentiation by squares [1]_ for efficiency. References ---------- .. [1] https://eli.thegreenplace.net/2009/03/21/efficient-integer-exponentiation-algorithms Examples -------- >>> from scipy.stats import moment >>> moment([1, 2, 3, 4, 5], moment=1) 0.0 >>> moment([1, 2, 3, 4, 5], moment=2) 2.0 """ a, axis = _chk_asarray(a, axis) contains_nan, nan_policy = _contains_nan(a, nan_policy) if contains_nan and nan_policy == 'omit': a = ma.masked_invalid(a) return mstats_basic.moment(a, moment, axis) if a.size == 0: # empty array, return nan(s) with shape matching `moment` if np.isscalar(moment): return np.nan else: return np.full(np.asarray(moment).shape, np.nan, dtype=np.float64) # for array_like moment input, return a value for each. if not np.isscalar(moment): mmnt = [_moment(a, i, axis) for i in moment] return np.array(mmnt) else: return _moment(a, moment, axis) def _moment(a, moment, axis): if np.abs(moment - np.round(moment)) > 0: raise ValueError("All moment parameters must be integers") if moment == 0: # When moment equals 0, the result is 1, by definition. shape = list(a.shape) del shape[axis] if shape: # return an actual array of the appropriate shape return np.ones(shape, dtype=float) else: # the input was 1D, so return a scalar instead of a rank-0 array return 1.0 elif moment == 1: # By definition the first moment about the mean is 0. shape = list(a.shape) del shape[axis] if shape: # return an actual array of the appropriate shape return np.zeros(shape, dtype=float) else: # the input was 1D, so return a scalar instead of a rank-0 array return np.float64(0.0) else: # Exponentiation by squares: form exponent sequence n_list = [moment] current_n = moment while current_n > 2: if current_n % 2: current_n = (current_n - 1) / 2 else: current_n /= 2 n_list.append(current_n) # Starting point for exponentiation by squares a_zero_mean = a - np.expand_dims(np.mean(a, axis), axis) if n_list[-1] == 1: s = a_zero_mean.copy() else: s = a_zero_mean**2 # Perform multiplications for n in n_list[-2::-1]: s = s**2 if n % 2: s *= a_zero_mean return np.mean(s, axis) def variation(a, axis=0, nan_policy='propagate'): """ Compute the coefficient of variation, the ratio of the biased standard deviation to the mean. Parameters ---------- a : array_like Input array. axis : int or None, optional Axis along which to calculate the coefficient of variation. Default is 0. If None, compute over the whole array `a`. nan_policy : {'propagate', 'raise', 'omit'}, optional Defines how to handle when input contains nan. 'propagate' returns nan, 'raise' throws an error, 'omit' performs the calculations ignoring nan values. Default is 'propagate'. Returns ------- variation : ndarray The calculated variation along the requested axis. References ---------- .. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard Probability and Statistics Tables and Formulae. Chapman & Hall: New York. 2000. Examples -------- >>> from scipy.stats import variation >>> variation([1, 2, 3, 4, 5]) 0.47140452079103173 """ a, axis = _chk_asarray(a, axis) contains_nan, nan_policy = _contains_nan(a, nan_policy) if contains_nan and nan_policy == 'omit': a = ma.masked_invalid(a) return mstats_basic.variation(a, axis) return a.std(axis) / a.mean(axis) def skew(a, axis=0, bias=True, nan_policy='propagate'): """ Compute the sample skewness of a data set. For normally distributed data, the skewness should be about 0. For unimodal continuous distributions, a skewness value > 0 means that there is more weight in the right tail of the distribution. The function `skewtest` can be used to determine if the skewness value is close enough to 0, statistically speaking. Parameters ---------- a : ndarray data axis : int or None, optional Axis along which skewness is calculated. Default is 0. If None, compute over the whole array `a`. bias : bool, optional If False, then the calculations are corrected for statistical bias. nan_policy : {'propagate', 'raise', 'omit'}, optional Defines how to handle when input contains nan. 'propagate' returns nan, 'raise' throws an error, 'omit' performs the calculations ignoring nan values. Default is 'propagate'. Returns ------- skewness : ndarray The skewness of values along an axis, returning 0 where all values are equal. Notes ----- The sample skewness is computed as the Fisher-Pearson coefficient of skewness, i.e. .. math:: g_1=\\frac{m_3}{m_2^{3/2}} where .. math:: m_i=\\frac{1}{N}\\sum_{n=1}^N(x[n]-\\bar{x})^i is the biased sample :math:`i\\texttt{th}` central moment, and :math:`\\bar{x}` is the sample mean. If ``bias`` is False, the calculations are corrected for bias and the value computed is the adjusted Fisher-Pearson standardized moment coefficient, i.e. .. math:: G_1=\\frac{k_3}{k_2^{3/2}}= \\frac{\\sqrt{N(N-1)}}{N-2}\\frac{m_3}{m_2^{3/2}}. References ---------- .. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard Probability and Statistics Tables and Formulae. Chapman & Hall: New York. 2000. Section 2.2.24.1 Examples -------- >>> from scipy.stats import skew >>> skew([1, 2, 3, 4, 5]) 0.0 >>> skew([2, 8, 0, 4, 1, 9, 9, 0]) 0.2650554122698573 """ a, axis = _chk_asarray(a, axis) n = a.shape[axis] contains_nan, nan_policy = _contains_nan(a, nan_policy) if contains_nan and nan_policy == 'omit': a = ma.masked_invalid(a) return mstats_basic.skew(a, axis, bias) m2 = moment(a, 2, axis) m3 = moment(a, 3, axis) zero = (m2 == 0) vals = _lazywhere(~zero, (m2, m3), lambda m2, m3: m3 / m2**1.5, 0.) if not bias: can_correct = (n > 2) & (m2 > 0) if can_correct.any(): m2 = np.extract(can_correct, m2) m3 = np.extract(can_correct, m3) nval = np.sqrt((n - 1.0) * n) / (n - 2.0) * m3 / m2**1.5 np.place(vals, can_correct, nval) if vals.ndim == 0: return vals.item() return vals def kurtosis(a, axis=0, fisher=True, bias=True, nan_policy='propagate'): """ Compute the kurtosis (Fisher or Pearson) of a dataset. Kurtosis is the fourth central moment divided by the square of the variance. If Fisher's definition is used, then 3.0 is subtracted from the result to give 0.0 for a normal distribution. If bias is False then the kurtosis is calculated using k statistics to eliminate bias coming from biased moment estimators Use `kurtosistest` to see if result is close enough to normal. Parameters ---------- a : array data for which the kurtosis is calculated axis : int or None, optional Axis along which the kurtosis is calculated. Default is 0. If None, compute over the whole array `a`. fisher : bool, optional If True, Fisher's definition is used (normal ==> 0.0). If False, Pearson's definition is used (normal ==> 3.0). bias : bool, optional If False, then the calculations are corrected for statistical bias. nan_policy : {'propagate', 'raise', 'omit'}, optional Defines how to handle when input contains nan. 'propagate' returns nan, 'raise' throws an error, 'omit' performs the calculations ignoring nan values. Default is 'propagate'. Returns ------- kurtosis : array The kurtosis of values along an axis. If all values are equal, return -3 for Fisher's definition and 0 for Pearson's definition. References ---------- .. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard Probability and Statistics Tables and Formulae. Chapman & Hall: New York. 2000. Examples -------- In Fisher's definiton, the kurtosis of the normal distribution is zero. In the following example, the kurtosis is close to zero, because it was calculated from the dataset, not from the continuous distribution. >>> from scipy.stats import norm, kurtosis >>> data = norm.rvs(size=1000, random_state=3) >>> kurtosis(data) -0.06928694200380558 The distribution with a higher kurtosis has a heavier tail. The zero valued kurtosis of the normal distribution in Fisher's definition can serve as a reference point. >>> import matplotlib.pyplot as plt >>> import scipy.stats as stats >>> from scipy.stats import kurtosis >>> x = np.linspace(-5, 5, 100) >>> ax = plt.subplot() >>> distnames = ['laplace', 'norm', 'uniform'] >>> for distname in distnames: ... if distname == 'uniform': ... dist = getattr(stats, distname)(loc=-2, scale=4) ... else: ... dist = getattr(stats, distname) ... data = dist.rvs(size=1000) ... kur = kurtosis(data, fisher=True) ... y = dist.pdf(x) ... ax.plot(x, y, label="{}, {}".format(distname, round(kur, 3))) ... ax.legend() The Laplace distribution has a heavier tail than the normal distribution. The uniform distribution (which has negative kurtosis) has the thinnest tail. """ a, axis = _chk_asarray(a, axis) contains_nan, nan_policy = _contains_nan(a, nan_policy) if contains_nan and nan_policy == 'omit': a = ma.masked_invalid(a) return mstats_basic.kurtosis(a, axis, fisher, bias) n = a.shape[axis] m2 = moment(a, 2, axis) m4 = moment(a, 4, axis) zero = (m2 == 0) olderr = np.seterr(all='ignore') try: vals = np.where(zero, 0, m4 / m2**2.0) finally: np.seterr(**olderr) if not bias: can_correct = (n > 3) & (m2 > 0) if can_correct.any(): m2 = np.extract(can_correct, m2) m4 = np.extract(can_correct, m4) nval = 1.0/(n-2)/(n-3) * ((n**2-1.0)*m4/m2**2.0 - 3*(n-1)**2.0) np.place(vals, can_correct, nval + 3.0) if vals.ndim == 0: vals = vals.item() # array scalar return vals - 3 if fisher else vals DescribeResult = namedtuple('DescribeResult', ('nobs', 'minmax', 'mean', 'variance', 'skewness', 'kurtosis')) def describe(a, axis=0, ddof=1, bias=True, nan_policy='propagate'): """ Compute several descriptive statistics of the passed array. Parameters ---------- a : array_like Input data. axis : int or None, optional Axis along which statistics are calculated. Default is 0. If None, compute over the whole array `a`. ddof : int, optional Delta degrees of freedom (only for variance). Default is 1. bias : bool, optional If False, then the skewness and kurtosis calculations are corrected for statistical bias. nan_policy : {'propagate', 'raise', 'omit'}, optional Defines how to handle when input contains nan. 'propagate' returns nan, 'raise' throws an error, 'omit' performs the calculations ignoring nan values. Default is 'propagate'. Returns ------- nobs : int or ndarray of ints Number of observations (length of data along `axis`). When 'omit' is chosen as nan_policy, each column is counted separately. minmax: tuple of ndarrays or floats Minimum and maximum value of data array. mean : ndarray or float Arithmetic mean of data along axis. variance : ndarray or float Unbiased variance of the data along axis, denominator is number of observations minus one. skewness : ndarray or float Skewness, based on moment calculations with denominator equal to the number of observations, i.e. no degrees of freedom correction. kurtosis : ndarray or float Kurtosis (Fisher). The kurtosis is normalized so that it is zero for the normal distribution. No degrees of freedom are used. See Also -------- skew, kurtosis Examples -------- >>> from scipy import stats >>> a = np.arange(10) >>> stats.describe(a) DescribeResult(nobs=10, minmax=(0, 9), mean=4.5, variance=9.166666666666666, skewness=0.0, kurtosis=-1.2242424242424244) >>> b = [[1, 2], [3, 4]] >>> stats.describe(b) DescribeResult(nobs=2, minmax=(array([1, 2]), array([3, 4])), mean=array([2., 3.]), variance=array([2., 2.]), skewness=array([0., 0.]), kurtosis=array([-2., -2.])) """ a, axis = _chk_asarray(a, axis) contains_nan, nan_policy = _contains_nan(a, nan_policy) if contains_nan and nan_policy == 'omit': a = ma.masked_invalid(a) return mstats_basic.describe(a, axis, ddof, bias) if a.size == 0: raise ValueError("The input must not be empty.") n = a.shape[axis] mm = (np.min(a, axis=axis), np.max(a, axis=axis)) m = np.mean(a, axis=axis) v = np.var(a, axis=axis, ddof=ddof) sk = skew(a, axis, bias=bias) kurt = kurtosis(a, axis, bias=bias) return DescribeResult(n, mm, m, v, sk, kurt) ##################################### # NORMALITY TESTS # ##################################### SkewtestResult = namedtuple('SkewtestResult', ('statistic', 'pvalue')) def skewtest(a, axis=0, nan_policy='propagate'): """ Test whether the skew is different from the normal distribution. This function tests the null hypothesis that the skewness of the population that the sample was drawn from is the same as that of a corresponding normal distribution. Parameters ---------- a : array The data to be tested axis : int or None, optional Axis along which statistics are calculated. Default is 0. If None, compute over the whole array `a`. nan_policy : {'propagate', 'raise', 'omit'}, optional Defines how to handle when input contains nan. 'propagate' returns nan, 'raise' throws an error, 'omit' performs the calculations ignoring nan values. Default is 'propagate'. Returns ------- statistic : float The computed z-score for this test. pvalue : float a 2-sided p-value for the hypothesis test Notes ----- The sample size must be at least 8. References ---------- .. [1] R. B. D'Agostino, A. J. Belanger and R. B. D'Agostino Jr., "A suggestion for using powerful and informative tests of normality", American Statistician 44, pp. 316-321, 1990. Examples -------- >>> from scipy.stats import skewtest >>> skewtest([1, 2, 3, 4, 5, 6, 7, 8]) SkewtestResult(statistic=1.0108048609177787, pvalue=0.3121098361421897) >>> skewtest([2, 8, 0, 4, 1, 9, 9, 0]) SkewtestResult(statistic=0.44626385374196975, pvalue=0.6554066631275459) >>> skewtest([1, 2, 3, 4, 5, 6, 7, 8000]) SkewtestResult(statistic=3.571773510360407, pvalue=0.0003545719905823133) >>> skewtest([100, 100, 100, 100, 100, 100, 100, 101]) SkewtestResult(statistic=3.5717766638478072, pvalue=0.000354567720281634) """ a, axis = _chk_asarray(a, axis) contains_nan, nan_policy = _contains_nan(a, nan_policy) if contains_nan and nan_policy == 'omit': a = ma.masked_invalid(a) return mstats_basic.skewtest(a, axis) if axis is None: a = np.ravel(a) axis = 0 b2 = skew(a, axis) n = a.shape[axis] if n < 8: raise ValueError( "skewtest is not valid with less than 8 samples; %i samples" " were given." % int(n)) y = b2 * math.sqrt(((n + 1) * (n + 3)) / (6.0 * (n - 2))) beta2 = (3.0 * (n**2 + 27*n - 70) * (n+1) * (n+3) / ((n-2.0) * (n+5) * (n+7) * (n+9))) W2 = -1 + math.sqrt(2 * (beta2 - 1)) delta = 1 / math.sqrt(0.5 * math.log(W2)) alpha = math.sqrt(2.0 / (W2 - 1)) y = np.where(y == 0, 1, y) Z = delta * np.log(y / alpha + np.sqrt((y / alpha)**2 + 1)) return SkewtestResult(Z, 2 * distributions.norm.sf(np.abs(Z))) KurtosistestResult = namedtuple('KurtosistestResult', ('statistic', 'pvalue')) def kurtosistest(a, axis=0, nan_policy='propagate'): """ Test whether a dataset has normal kurtosis. This function tests the null hypothesis that the kurtosis of the population from which the sample was drawn is that of the normal distribution: ``kurtosis = 3(n-1)/(n+1)``. Parameters ---------- a : array array of the sample data axis : int or None, optional Axis along which to compute test. Default is 0. If None, compute over the whole array `a`. nan_policy : {'propagate', 'raise', 'omit'}, optional Defines how to handle when input contains nan. 'propagate' returns nan, 'raise' throws an error, 'omit' performs the calculations ignoring nan values. Default is 'propagate'. Returns ------- statistic : float The computed z-score for this test. pvalue : float The 2-sided p-value for the hypothesis test Notes ----- Valid only for n>20. This function uses the method described in [1]_. References ---------- .. [1] see e.g. F. J. Anscombe, W. J. Glynn, "Distribution of the kurtosis statistic b2 for normal samples", Biometrika, vol. 70, pp. 227-234, 1983. Examples -------- >>> from scipy.stats import kurtosistest >>> kurtosistest(list(range(20))) KurtosistestResult(statistic=-1.7058104152122062, pvalue=0.08804338332528348) >>> np.random.seed(28041990) >>> s = np.random.normal(0, 1, 1000) >>> kurtosistest(s) KurtosistestResult(statistic=1.2317590987707365, pvalue=0.21803908613450895) """ a, axis = _chk_asarray(a, axis) contains_nan, nan_policy = _contains_nan(a, nan_policy) if contains_nan and nan_policy == 'omit': a = ma.masked_invalid(a) return mstats_basic.kurtosistest(a, axis) n = a.shape[axis] if n < 5: raise ValueError( "kurtosistest requires at least 5 observations; %i observations" " were given." % int(n)) if n < 20: warnings.warn("kurtosistest only valid for n>=20 ... continuing " "anyway, n=%i" % int(n)) b2 = kurtosis(a, axis, fisher=False) E = 3.0*(n-1) / (n+1) varb2 = 24.0*n*(n-2)*(n-3) / ((n+1)*(n+1.)*(n+3)*(n+5)) # [1]_ Eq. 1 x = (b2-E) / np.sqrt(varb2) # [1]_ Eq. 4 # [1]_ Eq. 2: sqrtbeta1 = 6.0*(n*n-5*n+2)/((n+7)*(n+9)) * np.sqrt((6.0*(n+3)*(n+5)) / (n*(n-2)*(n-3))) # [1]_ Eq. 3: A = 6.0 + 8.0/sqrtbeta1 * (2.0/sqrtbeta1 + np.sqrt(1+4.0/(sqrtbeta1**2))) term1 = 1 - 2/(9.0*A) denom = 1 + x*np.sqrt(2/(A-4.0)) term2 = np.sign(denom) * np.where(denom == 0.0, np.nan, np.power((1-2.0/A)/np.abs(denom), 1/3.0)) if np.any(denom == 0): msg = "Test statistic not defined in some cases due to division by " \ "zero. Return nan in that case..." warnings.warn(msg, RuntimeWarning) Z = (term1 - term2) / np.sqrt(2/(9.0*A)) # [1]_ Eq. 5 if Z.ndim == 0: Z = Z[()] # zprob uses upper tail, so Z needs to be positive return KurtosistestResult(Z, 2 * distributions.norm.sf(np.abs(Z))) NormaltestResult = namedtuple('NormaltestResult', ('statistic', 'pvalue')) def normaltest(a, axis=0, nan_policy='propagate'): """ Test whether a sample differs from a normal distribution. This function tests the null hypothesis that a sample comes from a normal distribution. It is based on D'Agostino and Pearson's [1]_, [2]_ test that combines skew and kurtosis to produce an omnibus test of normality. Parameters ---------- a : array_like The array containing the sample to be tested. axis : int or None, optional Axis along which to compute test. Default is 0. If None, compute over the whole array `a`. nan_policy : {'propagate', 'raise', 'omit'}, optional Defines how to handle when input contains nan. 'propagate' returns nan, 'raise' throws an error, 'omit' performs the calculations ignoring nan values. Default is 'propagate'. Returns ------- statistic : float or array ``s^2 + k^2``, where ``s`` is the z-score returned by `skewtest` and ``k`` is the z-score returned by `kurtosistest`. pvalue : float or array A 2-sided chi squared probability for the hypothesis test. References ---------- .. [1] D'Agostino, R. B. (1971), "An omnibus test of normality for moderate and large sample size", Biometrika, 58, 341-348 .. [2] D'Agostino, R. and Pearson, E. S. (1973), "Tests for departure from normality", Biometrika, 60, 613-622 Examples -------- >>> from scipy import stats >>> pts = 1000 >>> np.random.seed(28041990) >>> a = np.random.normal(0, 1, size=pts) >>> b = np.random.normal(2, 1, size=pts) >>> x = np.concatenate((a, b)) >>> k2, p = stats.normaltest(x) >>> alpha = 1e-3 >>> print("p = {:g}".format(p)) p = 3.27207e-11 >>> if p < alpha: # null hypothesis: x comes from a normal distribution ... print("The null hypothesis can be rejected") ... else: ... print("The null hypothesis cannot be rejected") The null hypothesis can be rejected """ a, axis = _chk_asarray(a, axis) contains_nan, nan_policy = _contains_nan(a, nan_policy) if contains_nan and nan_policy == 'omit': a = ma.masked_invalid(a) return mstats_basic.normaltest(a, axis) s, _ = skewtest(a, axis) k, _ = kurtosistest(a, axis) k2 = s*s + k*k return NormaltestResult(k2, distributions.chi2.sf(k2, 2)) def jarque_bera(x): """ Perform the Jarque-Bera goodness of fit test on sample data. The Jarque-Bera test tests whether the sample data has the skewness and kurtosis matching a normal distribution. Note that this test only works for a large enough number of data samples (>2000) as the test statistic asymptotically has a Chi-squared distribution with 2 degrees of freedom. Parameters ---------- x : array_like Observations of a random variable. Returns ------- jb_value : float The test statistic. p : float The p-value for the hypothesis test. References ---------- .. [1] Jarque, C. and Bera, A. (1980) "Efficient tests for normality, homoscedasticity and serial independence of regression residuals", 6 Econometric Letters 255-259. Examples -------- >>> from scipy import stats >>> np.random.seed(987654321) >>> x = np.random.normal(0, 1, 100000) >>> y = np.random.rayleigh(1, 100000) >>> stats.jarque_bera(x) (4.7165707989581342, 0.09458225503041906) >>> stats.jarque_bera(y) (6713.7098548143422, 0.0) """ x = np.asarray(x) n = x.size if n == 0: raise ValueError('At least one observation is required.') mu = x.mean() diffx = x - mu skewness = (1 / n * np.sum(diffx**3)) / (1 / n * np.sum(diffx**2))**(3 / 2.) kurtosis = (1 / n * np.sum(diffx**4)) / (1 / n * np.sum(diffx**2))**2 jb_value = n / 6 * (skewness**2 + (kurtosis - 3)**2 / 4) p = 1 - distributions.chi2.cdf(jb_value, 2) return jb_value, p ##################################### # FREQUENCY FUNCTIONS # ##################################### @np.deprecate(message="`itemfreq` is deprecated and will be removed in a " "future version. Use instead `np.unique(..., return_counts=True)`") def itemfreq(a): """ Return a 2-D array of item frequencies. Parameters ---------- a : (N,) array_like Input array. Returns ------- itemfreq : (K, 2) ndarray A 2-D frequency table. Column 1 contains sorted, unique values from `a`, column 2 contains their respective counts. Examples -------- >>> from scipy import stats >>> a = np.array([1, 1, 5, 0, 1, 2, 2, 0, 1, 4]) >>> stats.itemfreq(a) array([[ 0., 2.], [ 1., 4.], [ 2., 2.], [ 4., 1.], [ 5., 1.]]) >>> np.bincount(a) array([2, 4, 2, 0, 1, 1]) >>> stats.itemfreq(a/10.) array([[ 0. , 2. ], [ 0.1, 4. ], [ 0.2, 2. ], [ 0.4, 1. ], [ 0.5, 1. ]]) """ items, inv = np.unique(a, return_inverse=True) freq = np.bincount(inv) return np.array([items, freq]).T def scoreatpercentile(a, per, limit=(), interpolation_method='fraction', axis=None): """ Calculate the score at a given percentile of the input sequence. For example, the score at `per=50` is the median. If the desired quantile lies between two data points, we interpolate between them, according to the value of `interpolation`. If the parameter `limit` is provided, it should be a tuple (lower, upper) of two values. Parameters ---------- a : array_like A 1-D array of values from which to extract score. per : array_like Percentile(s) at which to extract score. Values should be in range [0,100]. limit : tuple, optional Tuple of two scalars, the lower and upper limits within which to compute the percentile. Values of `a` outside this (closed) interval will be ignored. interpolation_method : {'fraction', 'lower', 'higher'}, optional This optional parameter specifies the interpolation method to use, when the desired quantile lies between two data points `i` and `j` - fraction: ``i + (j - i) * fraction`` where ``fraction`` is the fractional part of the index surrounded by ``i`` and ``j``. - lower: ``i``. - higher: ``j``. axis : int, optional Axis along which the percentiles are computed. Default is None. If None, compute over the whole array `a`. Returns ------- score : float or ndarray Score at percentile(s). See Also -------- percentileofscore, numpy.percentile Notes ----- This function will become obsolete in the future. For NumPy 1.9 and higher, `numpy.percentile` provides all the functionality that `scoreatpercentile` provides. And it's significantly faster. Therefore it's recommended to use `numpy.percentile` for users that have numpy >= 1.9. Examples -------- >>> from scipy import stats >>> a = np.arange(100) >>> stats.scoreatpercentile(a, 50) 49.5 """ # adapted from NumPy's percentile function. When we require numpy >= 1.8, # the implementation of this function can be replaced by np.percentile. a = np.asarray(a) if a.size == 0: # empty array, return nan(s) with shape matching `per` if np.isscalar(per): return np.nan else: return np.full(np.asarray(per).shape, np.nan, dtype=np.float64) if limit: a = a[(limit[0] <= a) & (a <= limit[1])] sorted_ = np.sort(a, axis=axis) if axis is None: axis = 0 return _compute_qth_percentile(sorted_, per, interpolation_method, axis) # handle sequence of per's without calling sort multiple times def _compute_qth_percentile(sorted_, per, interpolation_method, axis): if not np.isscalar(per): score = [_compute_qth_percentile(sorted_, i, interpolation_method, axis) for i in per] return np.array(score) if not (0 <= per <= 100): raise ValueError("percentile must be in the range [0, 100]") indexer = [slice(None)] * sorted_.ndim idx = per / 100. * (sorted_.shape[axis] - 1) if int(idx) != idx: # round fractional indices according to interpolation method if interpolation_method == 'lower': idx = int(np.floor(idx)) elif interpolation_method == 'higher': idx = int(np.ceil(idx)) elif interpolation_method == 'fraction': pass # keep idx as fraction and interpolate else: raise ValueError("interpolation_method can only be 'fraction', " "'lower' or 'higher'") i = int(idx) if i == idx: indexer[axis] = slice(i, i + 1) weights = array(1) sumval = 1.0 else: indexer[axis] = slice(i, i + 2) j = i + 1 weights = array([(j - idx), (idx - i)], float) wshape = [1] * sorted_.ndim wshape[axis] = 2 weights.shape = wshape sumval = weights.sum() # Use np.add.reduce (== np.sum but a little faster) to coerce data type return np.add.reduce(sorted_[tuple(indexer)] * weights, axis=axis) / sumval def percentileofscore(a, score, kind='rank'): """ The percentile rank of a score relative to a list of scores. A `percentileofscore` of, for example, 80% means that 80% of the scores in `a` are below the given score. In the case of gaps or ties, the exact definition depends on the optional keyword, `kind`. Parameters ---------- a : array_like Array of scores to which `score` is compared. score : int or float Score that is compared to the elements in `a`. kind : {'rank', 'weak', 'strict', 'mean'}, optional This optional parameter specifies the interpretation of the resulting score: - "rank": Average percentage ranking of score. In case of multiple matches, average the percentage rankings of all matching scores. - "weak": This kind corresponds to the definition of a cumulative distribution function. A percentileofscore of 80% means that 80% of values are less than or equal to the provided score. - "strict": Similar to "weak", except that only values that are strictly less than the given score are counted. - "mean": The average of the "weak" and "strict" scores, often used in testing. See https://en.wikipedia.org/wiki/Percentile_rank Returns ------- pcos : float Percentile-position of score (0-100) relative to `a`. See Also -------- numpy.percentile Examples -------- Three-quarters of the given values lie below a given score: >>> from scipy import stats >>> stats.percentileofscore([1, 2, 3, 4], 3) 75.0 With multiple matches, note how the scores of the two matches, 0.6 and 0.8 respectively, are averaged: >>> stats.percentileofscore([1, 2, 3, 3, 4], 3) 70.0 Only 2/5 values are strictly less than 3: >>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='strict') 40.0 But 4/5 values are less than or equal to 3: >>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='weak') 80.0 The average between the weak and the strict scores is >>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='mean') 60.0 """ if np.isnan(score): return np.nan a = np.asarray(a) n = len(a) if n == 0: return 100.0 if kind == 'rank': left = np.count_nonzero(a < score) right = np.count_nonzero(a <= score) pct = (right + left + (1 if right > left else 0)) * 50.0/n return pct elif kind == 'strict': return np.count_nonzero(a < score) / n * 100 elif kind == 'weak': return np.count_nonzero(a <= score) / n * 100 elif kind == 'mean': pct = (np.count_nonzero(a < score) + np.count_nonzero(a <= score)) / n * 50 return pct else: raise ValueError("kind can only be 'rank', 'strict', 'weak' or 'mean'") HistogramResult = namedtuple('HistogramResult', ('count', 'lowerlimit', 'binsize', 'extrapoints')) def _histogram(a, numbins=10, defaultlimits=None, weights=None, printextras=False): """ Separate the range into several bins and return the number of instances in each bin. Parameters ---------- a : array_like Array of scores which will be put into bins. numbins : int, optional The number of bins to use for the histogram. Default is 10. defaultlimits : tuple (lower, upper), optional The lower and upper values for the range of the histogram. If no value is given, a range slightly larger than the range of the values in a is used. Specifically ``(a.min() - s, a.max() + s)``, where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``. weights : array_like, optional The weights for each value in `a`. Default is None, which gives each value a weight of 1.0 printextras : bool, optional If True, if there are extra points (i.e. the points that fall outside the bin limits) a warning is raised saying how many of those points there are. Default is False. Returns ------- count : ndarray Number of points (or sum of weights) in each bin. lowerlimit : float Lowest value of histogram, the lower limit of the first bin. binsize : float The size of the bins (all bins have the same size). extrapoints : int The number of points outside the range of the histogram. See Also -------- numpy.histogram Notes ----- This histogram is based on numpy's histogram but has a larger range by default if default limits is not set. """ a = np.ravel(a) if defaultlimits is None: if a.size == 0: # handle empty arrays. Undetermined range, so use 0-1. defaultlimits = (0, 1) else: # no range given, so use values in `a` data_min = a.min() data_max = a.max() # Have bins extend past min and max values slightly s = (data_max - data_min) / (2. * (numbins - 1.)) defaultlimits = (data_min - s, data_max + s) # use numpy's histogram method to compute bins hist, bin_edges = np.histogram(a, bins=numbins, range=defaultlimits, weights=weights) # hist are not always floats, convert to keep with old output hist = np.array(hist, dtype=float) # fixed width for bins is assumed, as numpy's histogram gives # fixed width bins for int values for 'bins' binsize = bin_edges[1] - bin_edges[0] # calculate number of extra points extrapoints = len([v for v in a if defaultlimits[0] > v or v > defaultlimits[1]]) if extrapoints > 0 and printextras: warnings.warn("Points outside given histogram range = %s" % extrapoints) return HistogramResult(hist, defaultlimits[0], binsize, extrapoints) CumfreqResult = namedtuple('CumfreqResult', ('cumcount', 'lowerlimit', 'binsize', 'extrapoints')) def cumfreq(a, numbins=10, defaultreallimits=None, weights=None): """ Return a cumulative frequency histogram, using the histogram function. A cumulative histogram is a mapping that counts the cumulative number of observations in all of the bins up to the specified bin. Parameters ---------- a : array_like Input array. numbins : int, optional The number of bins to use for the histogram. Default is 10. defaultreallimits : tuple (lower, upper), optional The lower and upper values for the range of the histogram. If no value is given, a range slightly larger than the range of the values in `a` is used. Specifically ``(a.min() - s, a.max() + s)``, where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``. weights : array_like, optional The weights for each value in `a`. Default is None, which gives each value a weight of 1.0 Returns ------- cumcount : ndarray Binned values of cumulative frequency. lowerlimit : float Lower real limit binsize : float Width of each bin. extrapoints : int Extra points. Examples -------- >>> import matplotlib.pyplot as plt >>> from scipy import stats >>> x = [1, 4, 2, 1, 3, 1] >>> res = stats.cumfreq(x, numbins=4, defaultreallimits=(1.5, 5)) >>> res.cumcount array([ 1., 2., 3., 3.]) >>> res.extrapoints 3 Create a normal distribution with 1000 random values >>> rng = np.random.RandomState(seed=12345) >>> samples = stats.norm.rvs(size=1000, random_state=rng) Calculate cumulative frequencies >>> res = stats.cumfreq(samples, numbins=25) Calculate space of values for x >>> x = res.lowerlimit + np.linspace(0, res.binsize*res.cumcount.size, ... res.cumcount.size) Plot histogram and cumulative histogram >>> fig = plt.figure(figsize=(10, 4)) >>> ax1 = fig.add_subplot(1, 2, 1) >>> ax2 = fig.add_subplot(1, 2, 2) >>> ax1.hist(samples, bins=25) >>> ax1.set_title('Histogram') >>> ax2.bar(x, res.cumcount, width=res.binsize) >>> ax2.set_title('Cumulative histogram') >>> ax2.set_xlim([x.min(), x.max()]) >>> plt.show() """ h, l, b, e = _histogram(a, numbins, defaultreallimits, weights=weights) cumhist = np.cumsum(h * 1, axis=0) return CumfreqResult(cumhist, l, b, e) RelfreqResult = namedtuple('RelfreqResult', ('frequency', 'lowerlimit', 'binsize', 'extrapoints')) def relfreq(a, numbins=10, defaultreallimits=None, weights=None): """ Return a relative frequency histogram, using the histogram function. A relative frequency histogram is a mapping of the number of observations in each of the bins relative to the total of observations. Parameters ---------- a : array_like Input array. numbins : int, optional The number of bins to use for the histogram. Default is 10. defaultreallimits : tuple (lower, upper), optional The lower and upper values for the range of the histogram. If no value is given, a range slightly larger than the range of the values in a is used. Specifically ``(a.min() - s, a.max() + s)``, where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``. weights : array_like, optional The weights for each value in `a`. Default is None, which gives each value a weight of 1.0 Returns ------- frequency : ndarray Binned values of relative frequency. lowerlimit : float Lower real limit binsize : float Width of each bin. extrapoints : int Extra points. Examples -------- >>> import matplotlib.pyplot as plt >>> from scipy import stats >>> a = np.array([2, 4, 1, 2, 3, 2]) >>> res = stats.relfreq(a, numbins=4) >>> res.frequency array([ 0.16666667, 0.5 , 0.16666667, 0.16666667]) >>> np.sum(res.frequency) # relative frequencies should add up to 1 1.0 Create a normal distribution with 1000 random values >>> rng = np.random.RandomState(seed=12345) >>> samples = stats.norm.rvs(size=1000, random_state=rng) Calculate relative frequencies >>> res = stats.relfreq(samples, numbins=25) Calculate space of values for x >>> x = res.lowerlimit + np.linspace(0, res.binsize*res.frequency.size, ... res.frequency.size) Plot relative frequency histogram >>> fig = plt.figure(figsize=(5, 4)) >>> ax = fig.add_subplot(1, 1, 1) >>> ax.bar(x, res.frequency, width=res.binsize) >>> ax.set_title('Relative frequency histogram') >>> ax.set_xlim([x.min(), x.max()]) >>> plt.show() """ a = np.asanyarray(a) h, l, b, e = _histogram(a, numbins, defaultreallimits, weights=weights) h = h / a.shape[0] return RelfreqResult(h, l, b, e) ##################################### # VARIABILITY FUNCTIONS # ##################################### def obrientransform(*args): """ Compute the O'Brien transform on input data (any number of arrays). Used to test for homogeneity of variance prior to running one-way stats. Each array in ``*args`` is one level of a factor. If `f_oneway` is run on the transformed data and found significant, the variances are unequal. From Maxwell and Delaney [1]_, p.112. Parameters ---------- args : tuple of array_like Any number of arrays. Returns ------- obrientransform : ndarray Transformed data for use in an ANOVA. The first dimension of the result corresponds to the sequence of transformed arrays. If the arrays given are all 1-D of the same length, the return value is a 2-D array; otherwise it is a 1-D array of type object, with each element being an ndarray. References ---------- .. [1] S. E. Maxwell and H. D. Delaney, "Designing Experiments and Analyzing Data: A Model Comparison Perspective", Wadsworth, 1990. Examples -------- We'll test the following data sets for differences in their variance. >>> x = [10, 11, 13, 9, 7, 12, 12, 9, 10] >>> y = [13, 21, 5, 10, 8, 14, 10, 12, 7, 15] Apply the O'Brien transform to the data. >>> from scipy.stats import obrientransform >>> tx, ty = obrientransform(x, y) Use `scipy.stats.f_oneway` to apply a one-way ANOVA test to the transformed data. >>> from scipy.stats import f_oneway >>> F, p = f_oneway(tx, ty) >>> p 0.1314139477040335 If we require that ``p < 0.05`` for significance, we cannot conclude that the variances are different. """ TINY = np.sqrt(np.finfo(float).eps) # `arrays` will hold the transformed arguments. arrays = [] for arg in args: a = np.asarray(arg) n = len(a) mu = np.mean(a) sq = (a - mu)**2 sumsq = sq.sum() # The O'Brien transform. t = ((n - 1.5) * n * sq - 0.5 * sumsq) / ((n - 1) * (n - 2)) # Check that the mean of the transformed data is equal to the # original variance. var = sumsq / (n - 1) if abs(var - np.mean(t)) > TINY: raise ValueError('Lack of convergence in obrientransform.') arrays.append(t) return np.array(arrays) def sem(a, axis=0, ddof=1, nan_policy='propagate'): """ Calculate the standard error of the mean (or standard error of measurement) of the values in the input array. Parameters ---------- a : array_like An array containing the values for which the standard error is returned. axis : int or None, optional Axis along which to operate. Default is 0. If None, compute over the whole array `a`. ddof : int, optional Delta degrees-of-freedom. How many degrees of freedom to adjust for bias in limited samples relative to the population estimate of variance. Defaults to 1. nan_policy : {'propagate', 'raise', 'omit'}, optional Defines how to handle when input contains nan. 'propagate' returns nan, 'raise' throws an error, 'omit' performs the calculations ignoring nan values. Default is 'propagate'. Returns ------- s : ndarray or float The standard error of the mean in the sample(s), along the input axis. Notes ----- The default value for `ddof` is different to the default (0) used by other ddof containing routines, such as np.std and np.nanstd. Examples -------- Find standard error along the first axis: >>> from scipy import stats >>> a = np.arange(20).reshape(5,4) >>> stats.sem(a) array([ 2.8284, 2.8284, 2.8284, 2.8284]) Find standard error across the whole array, using n degrees of freedom: >>> stats.sem(a, axis=None, ddof=0) 1.2893796958227628 """ a, axis = _chk_asarray(a, axis) contains_nan, nan_policy = _contains_nan(a, nan_policy) if contains_nan and nan_policy == 'omit': a = ma.masked_invalid(a) return mstats_basic.sem(a, axis, ddof) n = a.shape[axis] s = np.std(a, axis=axis, ddof=ddof) / np.sqrt(n) return s def zscore(a, axis=0, ddof=0, nan_policy='propagate'): """ Calculate the z score of each value in the sample, relative to the sample mean and standard deviation. Parameters ---------- a : array_like An array like object containing the sample data. axis : int or None, optional Axis along which to operate. Default is 0. If None, compute over the whole array `a`. ddof : int, optional Degrees of freedom correction in the calculation of the standard deviation. Default is 0. nan_policy : {'propagate', 'raise', 'omit'}, optional Defines how to handle when input contains nan. 'propagate' returns nan, 'raise' throws an error, 'omit' performs the calculations ignoring nan values. Default is 'propagate'. Returns ------- zscore : array_like The z-scores, standardized by mean and standard deviation of input array `a`. Notes ----- This function preserves ndarray subclasses, and works also with matrices and masked arrays (it uses `asanyarray` instead of `asarray` for parameters). Examples -------- >>> a = np.array([ 0.7972, 0.0767, 0.4383, 0.7866, 0.8091, ... 0.1954, 0.6307, 0.6599, 0.1065, 0.0508]) >>> from scipy import stats >>> stats.zscore(a) array([ 1.1273, -1.247 , -0.0552, 1.0923, 1.1664, -0.8559, 0.5786, 0.6748, -1.1488, -1.3324]) Computing along a specified axis, using n-1 degrees of freedom (``ddof=1``) to calculate the standard deviation: >>> b = np.array([[ 0.3148, 0.0478, 0.6243, 0.4608], ... [ 0.7149, 0.0775, 0.6072, 0.9656], ... [ 0.6341, 0.1403, 0.9759, 0.4064], ... [ 0.5918, 0.6948, 0.904 , 0.3721], ... [ 0.0921, 0.2481, 0.1188, 0.1366]]) >>> stats.zscore(b, axis=1, ddof=1) array([[-0.19264823, -1.28415119, 1.07259584, 0.40420358], [ 0.33048416, -1.37380874, 0.04251374, 1.00081084], [ 0.26796377, -1.12598418, 1.23283094, -0.37481053], [-0.22095197, 0.24468594, 1.19042819, -1.21416216], [-0.82780366, 1.4457416 , -0.43867764, -0.1792603 ]]) """ a = np.asanyarray(a) contains_nan, nan_policy = _contains_nan(a, nan_policy) if contains_nan and nan_policy == 'omit': mns = np.nanmean(a=a, axis=axis, keepdims=True) sstd = np.nanstd(a=a, axis=axis, ddof=ddof, keepdims=True) else: mns = a.mean(axis=axis, keepdims=True) sstd = a.std(axis=axis, ddof=ddof, keepdims=True) return (a - mns) / sstd def zmap(scores, compare, axis=0, ddof=0): """ Calculate the relative z-scores. Return an array of z-scores, i.e., scores that are standardized to zero mean and unit variance, where mean and variance are calculated from the comparison array. Parameters ---------- scores : array_like The input for which z-scores are calculated. compare : array_like The input from which the mean and standard deviation of the normalization are taken; assumed to have the same dimension as `scores`. axis : int or None, optional Axis over which mean and variance of `compare` are calculated. Default is 0. If None, compute over the whole array `scores`. ddof : int, optional Degrees of freedom correction in the calculation of the standard deviation. Default is 0. Returns ------- zscore : array_like Z-scores, in the same shape as `scores`. Notes ----- This function preserves ndarray subclasses, and works also with matrices and masked arrays (it uses `asanyarray` instead of `asarray` for parameters). Examples -------- >>> from scipy.stats import zmap >>> a = [0.5, 2.0, 2.5, 3] >>> b = [0, 1, 2, 3, 4] >>> zmap(a, b) array([-1.06066017, 0. , 0.35355339, 0.70710678]) """ scores, compare = map(np.asanyarray, [scores, compare]) mns = compare.mean(axis=axis, keepdims=True) sstd = compare.std(axis=axis, ddof=ddof, keepdims=True) return (scores - mns) / sstd def gstd(a, axis=0, ddof=1): """Calculate the geometric standard deviation of an array The geometric standard deviation describes the spread of a set of numbers where the geometric mean is preferred. It is a multiplicative factor, and so a dimensionless quantity. It is defined as the exponent of the standard deviation of ``log(a)``. Mathematically the population geometric standard deviation can be evaluated as:: gstd = exp(std(log(a))) .. versionadded:: 1.3.0 Parameters ---------- a : array_like An array like object containing the sample data. axis : int, tuple or None, optional Axis along which to operate. Default is 0. If None, compute over the whole array `a`. ddof : int, optional Degree of freedom correction in the calculation of the geometric standard deviation. Default is 1. Returns ------- ndarray or float An array of the geometric standard deviation. If `axis` is None or `a` is a 1d array a float is returned. Notes ----- As the calculation requires the use of logarithms the geometric standard deviation only supports strictly positive values. Any non-positive or infinite values will raise a `ValueError`. The geometric standard deviation is sometimes confused with the exponent of the standard deviation, ``exp(std(a))``. Instead the geometric standard deviation is ``exp(std(log(a)))``. The default value for `ddof` is different to the default value (0) used by other ddof containing functions, such as ``np.std`` and ``np.nanstd``. Examples -------- Find the geometric standard deviation of a log-normally distributed sample. Note that the standard deviation of the distribution is one, on a log scale this evaluates to approximately ``exp(1)``. >>> from scipy.stats import gstd >>> np.random.seed(123) >>> sample = np.random.lognormal(mean=0, sigma=1, size=1000) >>> gstd(sample) 2.7217860664589946 Compute the geometric standard deviation of a multidimensional array and of a given axis. >>> a = np.arange(1, 25).reshape(2, 3, 4) >>> gstd(a, axis=None) 2.2944076136018947 >>> gstd(a, axis=2) array([[1.82424757, 1.22436866, 1.13183117], [1.09348306, 1.07244798, 1.05914985]]) >>> gstd(a, axis=(1,2)) array([2.12939215, 1.22120169]) The geometric standard deviation further handles masked arrays. >>> a = np.arange(1, 25).reshape(2, 3, 4) >>> ma = np.ma.masked_where(a > 16, a) >>> ma masked_array( data=[[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]], [[13, 14, 15, 16], [--, --, --, --], [--, --, --, --]]], mask=[[[False, False, False, False], [False, False, False, False], [False, False, False, False]], [[False, False, False, False], [ True, True, True, True], [ True, True, True, True]]], fill_value=999999) >>> gstd(ma, axis=2) masked_array( data=[[1.8242475707663655, 1.2243686572447428, 1.1318311657788478], [1.0934830582350938, --, --]], mask=[[False, False, False], [False, True, True]], fill_value=999999) """ a = np.asanyarray(a) log = ma.log if isinstance(a, ma.MaskedArray) else np.log try: with warnings.catch_warnings(): warnings.simplefilter("error", RuntimeWarning) return np.exp(np.std(log(a), axis=axis, ddof=ddof)) except RuntimeWarning as w: if np.isinf(a).any(): raise ValueError( 'Infinite value encountered. The geometric standard deviation ' 'is defined for strictly positive values only.') a_nan = np.isnan(a) a_nan_any = a_nan.any() # exclude NaN's from negativity check, but # avoid expensive masking for arrays with no NaN if ((a_nan_any and np.less_equal(np.nanmin(a), 0)) or (not a_nan_any and np.less_equal(a, 0).any())): raise ValueError( 'Non positive value encountered. The geometric standard ' 'deviation is defined for strictly positive values only.') elif 'Degrees of freedom <= 0 for slice' == str(w): raise ValueError(w) else: # Remaining warnings don't need to be exceptions. return np.exp(np.std(log(a, where=~a_nan), axis=axis, ddof=ddof)) except TypeError: raise ValueError( 'Invalid array input. The inputs could not be ' 'safely coerced to any supported types') # Private dictionary initialized only once at module level # See https://en.wikipedia.org/wiki/Robust_measures_of_scale _scale_conversions = {'raw': 1.0, 'normal': special.erfinv(0.5) * 2.0 * math.sqrt(2.0)} def iqr(x, axis=None, rng=(25, 75), scale='raw', nan_policy='propagate', interpolation='linear', keepdims=False): """ Compute the interquartile range of the data along the specified axis. The interquartile range (IQR) is the difference between the 75th and 25th percentile of the data. It is a measure of the dispersion similar to standard deviation or variance, but is much more robust against outliers [2]_. The ``rng`` parameter allows this function to compute other percentile ranges than the actual IQR. For example, setting ``rng=(0, 100)`` is equivalent to `numpy.ptp`. The IQR of an empty array is `np.nan`. .. versionadded:: 0.18.0 Parameters ---------- x : array_like Input array or object that can be converted to an array. axis : int or sequence of int, optional Axis along which the range is computed. The default is to compute the IQR for the entire array. rng : Two-element sequence containing floats in range of [0,100] optional Percentiles over which to compute the range. Each must be between 0 and 100, inclusive. The default is the true IQR: `(25, 75)`. The order of the elements is not important. scale : scalar or str, optional The numerical value of scale will be divided out of the final result. The following string values are recognized: 'raw' : No scaling, just return the raw IQR. 'normal' : Scale by :math:`2 \\sqrt{2} erf^{-1}(\\frac{1}{2}) \\approx 1.349`. The default is 'raw'. Array-like scale is also allowed, as long as it broadcasts correctly to the output such that ``out / scale`` is a valid operation. The output dimensions depend on the input array, `x`, the `axis` argument, and the `keepdims` flag. nan_policy : {'propagate', 'raise', 'omit'}, optional Defines how to handle when input contains nan. 'propagate' returns nan, 'raise' throws an error, 'omit' performs the calculations ignoring nan values. Default is 'propagate'. interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}, optional Specifies the interpolation method to use when the percentile boundaries lie between two data points `i` and `j`: * 'linear' : `i + (j - i) * fraction`, where `fraction` is the fractional part of the index surrounded by `i` and `j`. * 'lower' : `i`. * 'higher' : `j`. * 'nearest' : `i` or `j` whichever is nearest. * 'midpoint' : `(i + j) / 2`. Default is 'linear'. keepdims : bool, optional If this is set to `True`, the reduced axes are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the original array `x`. Returns ------- iqr : scalar or ndarray If ``axis=None``, a scalar is returned. If the input contains integers or floats of smaller precision than ``np.float64``, then the output data-type is ``np.float64``. Otherwise, the output data-type is the same as that of the input. See Also -------- numpy.std, numpy.var Examples -------- >>> from scipy.stats import iqr >>> x = np.array([[10, 7, 4], [3, 2, 1]]) >>> x array([[10, 7, 4], [ 3, 2, 1]]) >>> iqr(x) 4.0 >>> iqr(x, axis=0) array([ 3.5, 2.5, 1.5]) >>> iqr(x, axis=1) array([ 3., 1.]) >>> iqr(x, axis=1, keepdims=True) array([[ 3.], [ 1.]]) Notes ----- This function is heavily dependent on the version of `numpy` that is installed. Versions greater than 1.11.0b3 are highly recommended, as they include a number of enhancements and fixes to `numpy.percentile` and `numpy.nanpercentile` that affect the operation of this function. The following modifications apply: Below 1.10.0 : `nan_policy` is poorly defined. The default behavior of `numpy.percentile` is used for 'propagate'. This is a hybrid of 'omit' and 'propagate' that mostly yields a skewed version of 'omit' since NaNs are sorted to the end of the data. A warning is raised if there are NaNs in the data. Below 1.9.0: `numpy.nanpercentile` does not exist. This means that `numpy.percentile` is used regardless of `nan_policy` and a warning is issued. See previous item for a description of the behavior. Below 1.9.0: `keepdims` and `interpolation` are not supported. The keywords get ignored with a warning if supplied with non-default values. However, multiple axes are still supported. References ---------- .. [1] "Interquartile range" https://en.wikipedia.org/wiki/Interquartile_range .. [2] "Robust measures of scale" https://en.wikipedia.org/wiki/Robust_measures_of_scale .. [3] "Quantile" https://en.wikipedia.org/wiki/Quantile """ x = asarray(x) # This check prevents percentile from raising an error later. Also, it is # consistent with `np.var` and `np.std`. if not x.size: return np.nan # An error may be raised here, so fail-fast, before doing lengthy # computations, even though `scale` is not used until later if isinstance(scale, string_types): scale_key = scale.lower() if scale_key not in _scale_conversions: raise ValueError("{0} not a valid scale for `iqr`".format(scale)) scale = _scale_conversions[scale_key] # Select the percentile function to use based on nans and policy contains_nan, nan_policy = _contains_nan(x, nan_policy) if contains_nan and nan_policy == 'omit': percentile_func = _iqr_nanpercentile else: percentile_func = _iqr_percentile if len(rng) != 2: raise TypeError("quantile range must be two element sequence") if np.isnan(rng).any(): raise ValueError("range must not contain NaNs") rng = sorted(rng) pct = percentile_func(x, rng, axis=axis, interpolation=interpolation, keepdims=keepdims, contains_nan=contains_nan) out = np.subtract(pct[1], pct[0]) if scale != 1.0: out /= scale return out def median_absolute_deviation(x, axis=0, center=np.median, scale=1.4826, nan_policy='propagate'): """ Compute the median absolute deviation of the data along the given axis. The median absolute deviation (MAD, [1]_) computes the median over the absolute deviations from the median. It is a measure of dispersion similar to the standard deviation, but is more robust to outliers [2]_. The MAD of an empty array is ``np.nan``. .. versionadded:: 1.3.0 Parameters ---------- x : array_like Input array or object that can be converted to an array. axis : int or None, optional Axis along which the range is computed. Default is 0. If None, compute the MAD over the entire array. center : callable, optional A function that will return the central value. The default is to use np.median. Any user defined function used will need to have the function signature ``func(arr, axis)``. scale : int, optional The scaling factor applied to the MAD. The default scale (1.4826) ensures consistency with the standard deviation for normally distributed data. nan_policy : {'propagate', 'raise', 'omit'}, optional Defines how to handle when input contains nan. 'propagate' returns nan, 'raise' throws an error, 'omit' performs the calculations ignoring nan values. Default is 'propagate'. Returns ------- mad : scalar or ndarray If ``axis=None``, a scalar is returned. If the input contains integers or floats of smaller precision than ``np.float64``, then the output data-type is ``np.float64``. Otherwise, the output data-type is the same as that of the input. See Also -------- numpy.std, numpy.var, numpy.median, scipy.stats.iqr, scipy.stats.tmean, scipy.stats.tstd, scipy.stats.tvar Notes ----- The `center` argument only affects the calculation of the central value around which the MAD is calculated. That is, passing in ``center=np.mean`` will calculate the MAD around the mean - it will not calculate the *mean* absolute deviation. References ---------- .. [1] "Median absolute deviation" https://en.wikipedia.org/wiki/Median_absolute_deviation .. [2] "Robust measures of scale" https://en.wikipedia.org/wiki/Robust_measures_of_scale Examples -------- When comparing the behavior of `median_absolute_deviation` with ``np.std``, the latter is affected when we change a single value of an array to have an outlier value while the MAD hardly changes: >>> from scipy import stats >>> x = stats.norm.rvs(size=100, scale=1, random_state=123456) >>> x.std() 0.9973906394005013 >>> stats.median_absolute_deviation(x) 1.2280762773108278 >>> x[0] = 345.6 >>> x.std() 34.42304872314415 >>> stats.median_absolute_deviation(x) 1.2340335571164334 Axis handling example: >>> x = np.array([[10, 7, 4], [3, 2, 1]]) >>> x array([[10, 7, 4], [ 3, 2, 1]]) >>> stats.median_absolute_deviation(x) array([5.1891, 3.7065, 2.2239]) >>> stats.median_absolute_deviation(x, axis=None) 2.9652 """ x = asarray(x) # Consistent with `np.var` and `np.std`. if not x.size: return np.nan contains_nan, nan_policy = _contains_nan(x, nan_policy) if contains_nan and nan_policy == 'propagate': return np.nan if contains_nan and nan_policy == 'omit': # Way faster than carrying the masks around arr = ma.masked_invalid(x).compressed() else: arr = x if axis is None: med = center(arr) mad = np.median(np.abs(arr - med)) else: med = np.apply_over_axes(center, arr, axis) mad = np.median(np.abs(arr - med), axis=axis) return scale * mad def _iqr_percentile(x, q, axis=None, interpolation='linear', keepdims=False, contains_nan=False): """ Private wrapper that works around older versions of `numpy`. While this function is pretty much necessary for the moment, it should be removed as soon as the minimum supported numpy version allows. """ if contains_nan and NumpyVersion(np.__version__) < '1.10.0a': # I see no way to avoid the version check to ensure that the corrected # NaN behavior has been implemented except to call `percentile` on a # small array. msg = "Keyword nan_policy='propagate' not correctly supported for " \ "numpy versions < 1.10.x. The default behavior of " \ "`numpy.percentile` will be used." warnings.warn(msg, RuntimeWarning) try: # For older versions of numpy, there are two things that can cause a # problem here: missing keywords and non-scalar axis. The former can be # partially handled with a warning, the latter can be handled fully by # hacking in an implementation similar to numpy's function for # providing multi-axis functionality # (`numpy.lib.function_base._ureduce` for the curious). result = np.percentile(x, q, axis=axis, keepdims=keepdims, interpolation=interpolation) except TypeError: if interpolation != 'linear' or keepdims: # At time or writing, this means np.__version__ < 1.9.0 warnings.warn("Keywords interpolation and keepdims not supported " "for your version of numpy", RuntimeWarning) try: # Special processing if axis is an iterable original_size = len(axis) except TypeError: # Axis is a scalar at this point pass else: axis = np.unique(np.asarray(axis) % x.ndim) if original_size > axis.size: # mimic numpy if axes are duplicated raise ValueError("duplicate value in axis") if axis.size == x.ndim: # axis includes all axes: revert to None axis = None elif axis.size == 1: # no rolling necessary axis = axis[0] else: # roll multiple axes to the end and flatten that part out for ax in axis[::-1]: x = np.rollaxis(x, ax, x.ndim) x = x.reshape(x.shape[:-axis.size] + (np.prod(x.shape[-axis.size:]),)) axis = -1 result = np.percentile(x, q, axis=axis) return result def _iqr_nanpercentile(x, q, axis=None, interpolation='linear', keepdims=False, contains_nan=False): """ Private wrapper that works around the following: 1. A bug in `np.nanpercentile` that was around until numpy version 1.11.0. 2. A bug in `np.percentile` NaN handling that was fixed in numpy version 1.10.0. 3. The non-existence of `np.nanpercentile` before numpy version 1.9.0. While this function is pretty much necessary for the moment, it should be removed as soon as the minimum supported numpy version allows. """ if hasattr(np, 'nanpercentile'): # At time or writing, this means np.__version__ < 1.9.0 result = np.nanpercentile(x, q, axis=axis, interpolation=interpolation, keepdims=keepdims) # If non-scalar result and nanpercentile does not do proper axis roll. # I see no way of avoiding the version test since dimensions may just # happen to match in the data. if result.ndim > 1 and NumpyVersion(np.__version__) < '1.11.0a': axis = np.asarray(axis) if axis.size == 1: # If only one axis specified, reduction happens along that dimension if axis.ndim == 0: axis = axis[None] result = np.rollaxis(result, axis[0]) else: # If multiple axes, reduced dimeision is last result = np.rollaxis(result, -1) else: msg = "Keyword nan_policy='omit' not correctly supported for numpy " \ "versions < 1.9.x. The default behavior of numpy.percentile " \ "will be used." warnings.warn(msg, RuntimeWarning) result = _iqr_percentile(x, q, axis=axis) return result ##################################### # TRIMMING FUNCTIONS # ##################################### SigmaclipResult = namedtuple('SigmaclipResult', ('clipped', 'lower', 'upper')) def sigmaclip(a, low=4., high=4.): """ Iterative sigma-clipping of array elements. Starting from the full sample, all elements outside the critical range are removed, i.e. all elements of the input array `c` that satisfy either of the following conditions :: c < mean(c) - std(c)*low c > mean(c) + std(c)*high The iteration continues with the updated sample until no elements are outside the (updated) range. Parameters ---------- a : array_like Data array, will be raveled if not 1-D. low : float, optional Lower bound factor of sigma clipping. Default is 4. high : float, optional Upper bound factor of sigma clipping. Default is 4. Returns ------- clipped : ndarray Input array with clipped elements removed. lower : float Lower threshold value use for clipping. upper : float Upper threshold value use for clipping. Examples -------- >>> from scipy.stats import sigmaclip >>> a = np.concatenate((np.linspace(9.5, 10.5, 31), ... np.linspace(0, 20, 5))) >>> fact = 1.5 >>> c, low, upp = sigmaclip(a, fact, fact) >>> c array([ 9.96666667, 10. , 10.03333333, 10. ]) >>> c.var(), c.std() (0.00055555555555555165, 0.023570226039551501) >>> low, c.mean() - fact*c.std(), c.min() (9.9646446609406727, 9.9646446609406727, 9.9666666666666668) >>> upp, c.mean() + fact*c.std(), c.max() (10.035355339059327, 10.035355339059327, 10.033333333333333) >>> a = np.concatenate((np.linspace(9.5, 10.5, 11), ... np.linspace(-100, -50, 3))) >>> c, low, upp = sigmaclip(a, 1.8, 1.8) >>> (c == np.linspace(9.5, 10.5, 11)).all() True """ c = np.asarray(a).ravel() delta = 1 while delta: c_std = c.std() c_mean = c.mean() size = c.size critlower = c_mean - c_std * low critupper = c_mean + c_std * high c = c[(c >= critlower) & (c <= critupper)] delta = size - c.size return SigmaclipResult(c, critlower, critupper) def trimboth(a, proportiontocut, axis=0): """ Slices off a proportion of items from both ends of an array. Slices off the passed proportion of items from both ends of the passed array (i.e., with `proportiontocut` = 0.1, slices leftmost 10% **and** rightmost 10% of scores). The trimmed values are the lowest and highest ones. Slices off less if proportion results in a non-integer slice index (i.e., conservatively slices off`proportiontocut`). Parameters ---------- a : array_like Data to trim. proportiontocut : float Proportion (in range 0-1) of total data set to trim of each end. axis : int or None, optional Axis along which to trim data. Default is 0. If None, compute over the whole array `a`. Returns ------- out : ndarray Trimmed version of array `a`. The order of the trimmed content is undefined. See Also -------- trim_mean Examples -------- >>> from scipy import stats >>> a = np.arange(20) >>> b = stats.trimboth(a, 0.1) >>> b.shape (16,) """ a = np.asarray(a) if a.size == 0: return a if axis is None: a = a.ravel() axis = 0 nobs = a.shape[axis] lowercut = int(proportiontocut * nobs) uppercut = nobs - lowercut if (lowercut >= uppercut): raise ValueError("Proportion too big.") atmp = np.partition(a, (lowercut, uppercut - 1), axis) sl = [slice(None)] * atmp.ndim sl[axis] = slice(lowercut, uppercut) return atmp[tuple(sl)] def trim1(a, proportiontocut, tail='right', axis=0): """ Slices off a proportion from ONE end of the passed array distribution. If `proportiontocut` = 0.1, slices off 'leftmost' or 'rightmost' 10% of scores. The lowest or highest values are trimmed (depending on the tail). Slices off less if proportion results in a non-integer slice index (i.e., conservatively slices off `proportiontocut` ). Parameters ---------- a : array_like Input array proportiontocut : float Fraction to cut off of 'left' or 'right' of distribution tail : {'left', 'right'}, optional Defaults to 'right'. axis : int or None, optional Axis along which to trim data. Default is 0. If None, compute over the whole array `a`. Returns ------- trim1 : ndarray Trimmed version of array `a`. The order of the trimmed content is undefined. """ a = np.asarray(a) if axis is None: a = a.ravel() axis = 0 nobs = a.shape[axis] # avoid possible corner case if proportiontocut >= 1: return [] if tail.lower() == 'right': lowercut = 0 uppercut = nobs - int(proportiontocut * nobs) elif tail.lower() == 'left': lowercut = int(proportiontocut * nobs) uppercut = nobs atmp = np.partition(a, (lowercut, uppercut - 1), axis) return atmp[lowercut:uppercut] def trim_mean(a, proportiontocut, axis=0): """ Return mean of array after trimming distribution from both tails. If `proportiontocut` = 0.1, slices off 'leftmost' and 'rightmost' 10% of scores. The input is sorted before slicing. Slices off less if proportion results in a non-integer slice index (i.e., conservatively slices off `proportiontocut` ). Parameters ---------- a : array_like Input array proportiontocut : float Fraction to cut off of both tails of the distribution axis : int or None, optional Axis along which the trimmed means are computed. Default is 0. If None, compute over the whole array `a`. Returns ------- trim_mean : ndarray Mean of trimmed array. See Also -------- trimboth tmean : compute the trimmed mean ignoring values outside given `limits`. Examples -------- >>> from scipy import stats >>> x = np.arange(20) >>> stats.trim_mean(x, 0.1) 9.5 >>> x2 = x.reshape(5, 4) >>> x2 array([[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11], [12, 13, 14, 15], [16, 17, 18, 19]]) >>> stats.trim_mean(x2, 0.25) array([ 8., 9., 10., 11.]) >>> stats.trim_mean(x2, 0.25, axis=1) array([ 1.5, 5.5, 9.5, 13.5, 17.5]) """ a = np.asarray(a) if a.size == 0: return np.nan if axis is None: a = a.ravel() axis = 0 nobs = a.shape[axis] lowercut = int(proportiontocut * nobs) uppercut = nobs - lowercut if (lowercut > uppercut): raise ValueError("Proportion too big.") atmp = np.partition(a, (lowercut, uppercut - 1), axis) sl = [slice(None)] * atmp.ndim sl[axis] = slice(lowercut, uppercut) return np.mean(atmp[tuple(sl)], axis=axis) F_onewayResult = namedtuple('F_onewayResult', ('statistic', 'pvalue')) def f_oneway(*args): """ Performs a 1-way ANOVA. The one-way ANOVA tests the null hypothesis that two or more groups have the same population mean. The test is applied to samples from two or more groups, possibly with differing sizes. Parameters ---------- sample1, sample2, ... : array_like The sample measurements for each group. Returns ------- statistic : float The computed F-value of the test. pvalue : float The associated p-value from the F-distribution. Notes ----- The ANOVA test has important assumptions that must be satisfied in order for the associated p-value to be valid. 1. The samples are independent. 2. Each sample is from a normally distributed population. 3. The population standard deviations of the groups are all equal. This property is known as homoscedasticity. If these assumptions are not true for a given set of data, it may still be possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`) although with some loss of power. The algorithm is from Heiman[2], pp.394-7. References ---------- .. [1] R. Lowry, "Concepts and Applications of Inferential Statistics", Chapter 14, 2014, http://vassarstats.net/textbook/ .. [2] G.W. Heiman, "Understanding research methods and statistics: An integrated introduction for psychology", Houghton, Mifflin and Company, 2001. .. [3] G.H. McDonald, "Handbook of Biological Statistics", One-way ANOVA. http://www.biostathandbook.com/onewayanova.html Examples -------- >>> import scipy.stats as stats [3]_ Here are some data on a shell measurement (the length of the anterior adductor muscle scar, standardized by dividing by length) in the mussel Mytilus trossulus from five locations: Tillamook, Oregon; Newport, Oregon; Petersburg, Alaska; Magadan, Russia; and Tvarminne, Finland, taken from a much larger data set used in McDonald et al. (1991). >>> tillamook = [0.0571, 0.0813, 0.0831, 0.0976, 0.0817, 0.0859, 0.0735, ... 0.0659, 0.0923, 0.0836] >>> newport = [0.0873, 0.0662, 0.0672, 0.0819, 0.0749, 0.0649, 0.0835, ... 0.0725] >>> petersburg = [0.0974, 0.1352, 0.0817, 0.1016, 0.0968, 0.1064, 0.105] >>> magadan = [0.1033, 0.0915, 0.0781, 0.0685, 0.0677, 0.0697, 0.0764, ... 0.0689] >>> tvarminne = [0.0703, 0.1026, 0.0956, 0.0973, 0.1039, 0.1045] >>> stats.f_oneway(tillamook, newport, petersburg, magadan, tvarminne) (7.1210194716424473, 0.00028122423145345439) """ args = [np.asarray(arg, dtype=float) for arg in args] # ANOVA on N groups, each in its own array num_groups = len(args) alldata = np.concatenate(args) bign = len(alldata) # Determine the mean of the data, and subtract that from all inputs to a # variance (via sum_of_sq / sq_of_sum) calculation. Variance is invariance # to a shift in location, and centering all data around zero vastly # improves numerical stability. offset = alldata.mean() alldata -= offset sstot = _sum_of_squares(alldata) - (_square_of_sums(alldata) / bign) ssbn = 0 for a in args: ssbn += _square_of_sums(a - offset) / len(a) # Naming: variables ending in bn/b are for "between treatments", wn/w are # for "within treatments" ssbn -= _square_of_sums(alldata) / bign sswn = sstot - ssbn dfbn = num_groups - 1 dfwn = bign - num_groups msb = ssbn / dfbn msw = sswn / dfwn f = msb / msw prob = special.fdtrc(dfbn, dfwn, f) # equivalent to stats.f.sf return F_onewayResult(f, prob) class PearsonRConstantInputWarning(RuntimeWarning): """ Warning generated by `pearsonr` when an input is constant. """ def __init__(self, msg=None): if msg is None: msg = ("An input array is constant; the correlation coefficent " "is not defined.") self.args = (msg,) class PearsonRNearConstantInputWarning(RuntimeWarning): """ Warning generated by `pearsonr` when an input is nearly constant. """ def __init__(self, msg=None): if msg is None: msg = ("An input array is nearly constant; the computed " "correlation coefficent may be inaccurate.") self.args = (msg,) def pearsonr(x, y): r""" Pearson correlation coefficient and p-value for testing non-correlation. The Pearson correlation coefficient [1]_ measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. (See Kowalski [3]_ for a discussion of the effects of non-normality of the input on the distribution of the correlation coefficient.) Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Parameters ---------- x : (N,) array_like Input y : (N,) array_like Input Returns ------- r : float Pearson's correlation coefficient p-value : float two-tailed p-value Warns ----- PearsonRConstantInputWarning Raised if an input is a constant array. The correlation coefficient is not defined in this case, so ``np.nan`` is returned. PearsonRNearConstantInputWarning Raised if an input is "nearly" constant. The array ``x`` is considered nearly constant if ``norm(x - mean(x)) < 1e-13 * abs(mean(x))``. Numerical errors in the calculation ``x - mean(x)`` in this case might result in an inaccurate calculation of r. See Also -------- spearmanr : Spearman rank-order correlation coefficient. kendalltau : Kendall's tau, a correlation measure for ordinal data. Notes ----- The correlation coefficient is calculated as follows: .. math:: r = \frac{\sum (x - m_x) (y - m_y)} {\sqrt{\sum (x - m_x)^2 \sum (y - m_y)^2}} where :math:`m_x` is the mean of the vector :math:`x` and :math:`m_y` is the mean of the vector :math:`y`. Under the assumption that x and y are drawn from independent normal distributions (so the population correlation coefficient is 0), the probability density function of the sample correlation coefficient r is ([1]_, [2]_):: (1 - r**2)**(n/2 - 2) f(r) = --------------------- B(1/2, n/2 - 1) where n is the number of samples, and B is the beta function. This is sometimes referred to as the exact distribution of r. This is the distribution that is used in `pearsonr` to compute the p-value. The distribution is a beta distribution on the interval [-1, 1], with equal shape parameters a = b = n/2 - 1. In terms of SciPy's implementation of the beta distribution, the distribution of r is:: dist = scipy.stats.beta(n/2 - 1, n/2 - 1, loc=-1, scale=2) The p-value returned by `pearsonr` is a two-sided p-value. For a given sample with correlation coefficient r, the p-value is the probability that abs(r') of a random sample x' and y' drawn from the population with zero correlation would be greater than or equal to abs(r). In terms of the object ``dist`` shown above, the p-value for a given r and length n can be computed as:: p = 2*dist.cdf(-abs(r)) When n is 2, the above continuous distribution is not well-defined. One can interpret the limit of the beta distribution as the shape parameters a and b approach a = b = 0 as a discrete distribution with equal probability masses at r = 1 and r = -1. More directly, one can observe that, given the data x = [x1, x2] and y = [y1, y2], and assuming x1 != x2 and y1 != y2, the only possible values for r are 1 and -1. Because abs(r') for any sample x' and y' with length 2 will be 1, the two-sided p-value for a sample of length 2 is always 1. References ---------- .. [1] "Pearson correlation coefficient", Wikipedia, https://en.wikipedia.org/wiki/Pearson_correlation_coefficient .. [2] Student, "Probable error of a correlation coefficient", Biometrika, Volume 6, Issue 2-3, 1 September 1908, pp. 302-310. .. [3] C. J. Kowalski, "On the Effects of Non-Normality on the Distribution of the Sample Product-Moment Correlation Coefficient" Journal of the Royal Statistical Society. Series C (Applied Statistics), Vol. 21, No. 1 (1972), pp. 1-12. Examples -------- >>> from scipy import stats >>> a = np.array([0, 0, 0, 1, 1, 1, 1]) >>> b = np.arange(7) >>> stats.pearsonr(a, b) (0.8660254037844386, 0.011724811003954649) >>> stats.pearsonr([1, 2, 3, 4, 5], [10, 9, 2.5, 6, 4]) (-0.7426106572325057, 0.1505558088534455) """ n = len(x) if n != len(y): raise ValueError('x and y must have the same length.') if n < 2: raise ValueError('x and y must have length at least 2.') x = np.asarray(x) y = np.asarray(y) # If an input is constant, the correlation coefficient is not defined. if (x == x[0]).all() or (y == y[0]).all(): warnings.warn(PearsonRConstantInputWarning()) return np.nan, np.nan # dtype is the data type for the calculations. This expression ensures # that the data type is at least 64 bit floating point. It might have # more precision if the input is, for example, np.longdouble. dtype = type(1.0 + x[0] + y[0]) if n == 2: return dtype(np.sign(x[1] - x[0])*np.sign(y[1] - y[0])), 1.0 xmean = x.mean(dtype=dtype) ymean = y.mean(dtype=dtype) # By using `astype(dtype)`, we ensure that the intermediate calculations # use at least 64 bit floating point. xm = x.astype(dtype) - xmean ym = y.astype(dtype) - ymean # Unlike np.linalg.norm or the expression sqrt((xm*xm).sum()), # scipy.linalg.norm(xm) does not overflow if xm is, for example, # [-5e210, 5e210, 3e200, -3e200] normxm = linalg.norm(xm) normym = linalg.norm(ym) threshold = 1e-13 if normxm < threshold*abs(xmean) or normym < threshold*abs(ymean): # If all the values in x (likewise y) are very close to the mean, # the loss of precision that occurs in the subtraction xm = x - xmean # might result in large errors in r. warnings.warn(PearsonRNearConstantInputWarning()) r = np.dot(xm/normxm, ym/normym) # Presumably, if abs(r) > 1, then it is only some small artifact of # floating point arithmetic. r = max(min(r, 1.0), -1.0) # As explained in the docstring, the p-value can be computed as # p = 2*dist.cdf(-abs(r)) # where dist is the beta distribution on [-1, 1] with shape parameters # a = b = n/2 - 1. `special.btdtr` is the CDF for the beta distribution # on [0, 1]. To use it, we make the transformation x = (r + 1)/2; the # shape parameters do not change. Then -abs(r) used in `cdf(-abs(r))` # becomes x = (-abs(r) + 1)/2 = 0.5*(1 - abs(r)). (r is cast to float64 # to avoid a TypeError raised by btdtr when r is higher precision.) ab = n/2 - 1 prob = 2*special.btdtr(ab, ab, 0.5*(1 - abs(np.float64(r)))) return r, prob def fisher_exact(table, alternative='two-sided'): """Performs a Fisher exact test on a 2x2 contingency table. Parameters ---------- table : array_like of ints A 2x2 contingency table. Elements should be non-negative integers. alternative : {'two-sided', 'less', 'greater'}, optional Which alternative hypothesis to the null hypothesis the test uses. Default is 'two-sided'. Returns ------- oddsratio : float This is prior odds ratio and not a posterior estimate. p_value : float P-value, the probability of obtaining a distribution at least as extreme as the one that was actually observed, assuming that the null hypothesis is true. See Also -------- chi2_contingency : Chi-square test of independence of variables in a contingency table. Notes ----- The calculated odds ratio is different from the one R uses. This scipy implementation returns the (more common) "unconditional Maximum Likelihood Estimate", while R uses the "conditional Maximum Likelihood Estimate". For tables with large numbers, the (inexact) chi-square test implemented in the function `chi2_contingency` can also be used. Examples -------- Say we spend a few days counting whales and sharks in the Atlantic and Indian oceans. In the Atlantic ocean we find 8 whales and 1 shark, in the Indian ocean 2 whales and 5 sharks. Then our contingency table is:: Atlantic Indian whales 8 2 sharks 1 5 We use this table to find the p-value: >>> import scipy.stats as stats >>> oddsratio, pvalue = stats.fisher_exact([[8, 2], [1, 5]]) >>> pvalue 0.0349... The probability that we would observe this or an even more imbalanced ratio by chance is about 3.5%. A commonly used significance level is 5%--if we adopt that, we can therefore conclude that our observed imbalance is statistically significant; whales prefer the Atlantic while sharks prefer the Indian ocean. """ hypergeom = distributions.hypergeom c = np.asarray(table, dtype=np.int64) # int32 is not enough for the algorithm if not c.shape == (2, 2): raise ValueError("The input `table` must be of shape (2, 2).") if np.any(c < 0): raise ValueError("All values in `table` must be nonnegative.") if 0 in c.sum(axis=0) or 0 in c.sum(axis=1): # If both values in a row or column are zero, the p-value is 1 and # the odds ratio is NaN. return np.nan, 1.0 if c[1, 0] > 0 and c[0, 1] > 0: oddsratio = c[0, 0] * c[1, 1] / (c[1, 0] * c[0, 1]) else: oddsratio = np.inf n1 = c[0, 0] + c[0, 1] n2 = c[1, 0] + c[1, 1] n = c[0, 0] + c[1, 0] def binary_search(n, n1, n2, side): """Binary search for where to begin lower/upper halves in two-sided test. """ if side == "upper": minval = mode maxval = n else: minval = 0 maxval = mode guess = -1 while maxval - minval > 1: if maxval == minval + 1 and guess == minval: guess = maxval else: guess = (maxval + minval) // 2 pguess = hypergeom.pmf(guess, n1 + n2, n1, n) if side == "upper": ng = guess - 1 else: ng = guess + 1 if pguess <= pexact < hypergeom.pmf(ng, n1 + n2, n1, n): break elif pguess < pexact: maxval = guess else: minval = guess if guess == -1: guess = minval if side == "upper": while guess > 0 and hypergeom.pmf(guess, n1 + n2, n1, n) < pexact * epsilon: guess -= 1 while hypergeom.pmf(guess, n1 + n2, n1, n) > pexact / epsilon: guess += 1 else: while hypergeom.pmf(guess, n1 + n2, n1, n) < pexact * epsilon: guess += 1 while guess > 0 and hypergeom.pmf(guess, n1 + n2, n1, n) > pexact / epsilon: guess -= 1 return guess if alternative == 'less': pvalue = hypergeom.cdf(c[0, 0], n1 + n2, n1, n) elif alternative == 'greater': # Same formula as the 'less' case, but with the second column. pvalue = hypergeom.cdf(c[0, 1], n1 + n2, n1, c[0, 1] + c[1, 1]) elif alternative == 'two-sided': mode = int((n + 1) * (n1 + 1) / (n1 + n2 + 2)) pexact = hypergeom.pmf(c[0, 0], n1 + n2, n1, n) pmode = hypergeom.pmf(mode, n1 + n2, n1, n) epsilon = 1 - 1e-4 if np.abs(pexact - pmode) / np.maximum(pexact, pmode) <= 1 - epsilon: return oddsratio, 1. elif c[0, 0] < mode: plower = hypergeom.cdf(c[0, 0], n1 + n2, n1, n) if hypergeom.pmf(n, n1 + n2, n1, n) > pexact / epsilon: return oddsratio, plower guess = binary_search(n, n1, n2, "upper") pvalue = plower + hypergeom.sf(guess - 1, n1 + n2, n1, n) else: pupper = hypergeom.sf(c[0, 0] - 1, n1 + n2, n1, n) if hypergeom.pmf(0, n1 + n2, n1, n) > pexact / epsilon: return oddsratio, pupper guess = binary_search(n, n1, n2, "lower") pvalue = pupper + hypergeom.cdf(guess, n1 + n2, n1, n) else: msg = "`alternative` should be one of {'two-sided', 'less', 'greater'}" raise ValueError(msg) pvalue = min(pvalue, 1.0) return oddsratio, pvalue SpearmanrResult = namedtuple('SpearmanrResult', ('correlation', 'pvalue')) def spearmanr(a, b=None, axis=0, nan_policy='propagate'): """ Calculate a Spearman rank-order correlation coefficient and the p-value to test for non-correlation. The Spearman correlation is a nonparametric measure of the monotonicity of the relationship between two datasets. Unlike the Pearson correlation, the Spearman correlation does not assume that both datasets are normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact monotonic relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Spearman correlation at least as extreme as the one computed from these datasets. The p-values are not entirely reliable but are probably reasonable for datasets larger than 500 or so. Parameters ---------- a, b : 1D or 2D array_like, b is optional One or two 1-D or 2-D arrays containing multiple variables and observations. When these are 1-D, each represents a vector of observations of a single variable. For the behavior in the 2-D case, see under ``axis``, below. Both arrays need to have the same length in the ``axis`` dimension. axis : int or None, optional If axis=0 (default), then each column represents a variable, with observations in the rows. If axis=1, the relationship is transposed: each row represents a variable, while the columns contain observations. If axis=None, then both arrays will be raveled. nan_policy : {'propagate', 'raise', 'omit'}, optional Defines how to handle when input contains nan. 'propagate' returns nan, 'raise' throws an error, 'omit' performs the calculations ignoring nan values. Default is 'propagate'. Returns ------- correlation : float or ndarray (2-D square) Spearman correlation matrix or correlation coefficient (if only 2 variables are given as parameters. Correlation matrix is square with length equal to total number of variables (columns or rows) in ``a`` and ``b`` combined. pvalue : float The two-sided p-value for a hypothesis test whose null hypothesis is that two sets of data are uncorrelated, has same dimension as rho. References ---------- .. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard Probability and Statistics Tables and Formulae. Chapman & Hall: New York. 2000. Section 14.7 Examples -------- >>> from scipy import stats >>> stats.spearmanr([1,2,3,4,5], [5,6,7,8,7]) (0.82078268166812329, 0.088587005313543798) >>> np.random.seed(1234321) >>> x2n = np.random.randn(100, 2) >>> y2n = np.random.randn(100, 2) >>> stats.spearmanr(x2n) (0.059969996999699973, 0.55338590803773591) >>> stats.spearmanr(x2n[:,0], x2n[:,1]) (0.059969996999699973, 0.55338590803773591) >>> rho, pval = stats.spearmanr(x2n, y2n) >>> rho array([[ 1. , 0.05997 , 0.18569457, 0.06258626], [ 0.05997 , 1. , 0.110003 , 0.02534653], [ 0.18569457, 0.110003 , 1. , 0.03488749], [ 0.06258626, 0.02534653, 0.03488749, 1. ]]) >>> pval array([[ 0. , 0.55338591, 0.06435364, 0.53617935], [ 0.55338591, 0. , 0.27592895, 0.80234077], [ 0.06435364, 0.27592895, 0. , 0.73039992], [ 0.53617935, 0.80234077, 0.73039992, 0. ]]) >>> rho, pval = stats.spearmanr(x2n.T, y2n.T, axis=1) >>> rho array([[ 1. , 0.05997 , 0.18569457, 0.06258626], [ 0.05997 , 1. , 0.110003 , 0.02534653], [ 0.18569457, 0.110003 , 1. , 0.03488749], [ 0.06258626, 0.02534653, 0.03488749, 1. ]]) >>> stats.spearmanr(x2n, y2n, axis=None) (0.10816770419260482, 0.1273562188027364) >>> stats.spearmanr(x2n.ravel(), y2n.ravel()) (0.10816770419260482, 0.1273562188027364) >>> xint = np.random.randint(10, size=(100, 2)) >>> stats.spearmanr(xint) (0.052760927029710199, 0.60213045837062351) """ a, axisout = _chk_asarray(a, axis) if a.ndim > 2: raise ValueError("spearmanr only handles 1-D or 2-D arrays") if b is None: if a.ndim < 2: raise ValueError("`spearmanr` needs at least 2 variables to compare") else: # Concatenate a and b, so that we now only have to handle the case # of a 2-D `a`. b, _ = _chk_asarray(b, axis) if axisout == 0: a = np.column_stack((a, b)) else: a = np.row_stack((a, b)) n_vars = a.shape[1 - axisout] n_obs = a.shape[axisout] if n_obs <= 1: # Handle empty arrays or single observations. return SpearmanrResult(np.nan, np.nan) a_contains_nan, nan_policy = _contains_nan(a, nan_policy) variable_has_nan = np.zeros(n_vars, dtype=bool) if a_contains_nan: if nan_policy == 'omit': return mstats_basic.spearmanr(a, axis=axis, nan_policy=nan_policy) elif nan_policy == 'propagate': if a.ndim == 1 or n_vars <= 2: return SpearmanrResult(np.nan, np.nan) else: # Keep track of variables with NaNs, set the outputs to NaN # only for those variables variable_has_nan = np.isnan(a).sum(axis=axisout) a_ranked = np.apply_along_axis(rankdata, axisout, a) rs = np.corrcoef(a_ranked, rowvar=axisout) dof = n_obs - 2 # degrees of freedom # rs can have elements equal to 1, so avoid zero division warnings olderr = np.seterr(divide='ignore') try: # clip the small negative values possibly caused by rounding # errors before taking the square root t = rs * np.sqrt((dof/((rs+1.0)*(1.0-rs))).clip(0)) finally: np.seterr(**olderr) prob = 2 * distributions.t.sf(np.abs(t), dof) # For backwards compatibility, return scalars when comparing 2 columns if rs.shape == (2, 2): return SpearmanrResult(rs[1, 0], prob[1, 0]) else: rs[variable_has_nan, :] = np.nan rs[:, variable_has_nan] = np.nan return SpearmanrResult(rs, prob) PointbiserialrResult = namedtuple('PointbiserialrResult', ('correlation', 'pvalue')) def pointbiserialr(x, y): r""" Calculate a point biserial correlation coefficient and its p-value. The point biserial correlation is used to measure the relationship between a binary variable, x, and a continuous variable, y. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply a determinative relationship. This function uses a shortcut formula but produces the same result as `pearsonr`. Parameters ---------- x : array_like of bools Input array. y : array_like Input array. Returns ------- correlation : float R value pvalue : float 2-tailed p-value Notes ----- `pointbiserialr` uses a t-test with ``n-1`` degrees of freedom. It is equivalent to `pearsonr.` The value of the point-biserial correlation can be calculated from: .. math:: r_{pb} = \frac{\overline{Y_{1}} - \overline{Y_{0}}}{s_{y}}\sqrt{\frac{N_{1} N_{2}}{N (N - 1))}} Where :math:`Y_{0}` and :math:`Y_{1}` are means of the metric observations coded 0 and 1 respectively; :math:`N_{0}` and :math:`N_{1}` are number of observations coded 0 and 1 respectively; :math:`N` is the total number of observations and :math:`s_{y}` is the standard deviation of all the metric observations. A value of :math:`r_{pb}` that is significantly different from zero is completely equivalent to a significant difference in means between the two groups. Thus, an independent groups t Test with :math:`N-2` degrees of freedom may be used to test whether :math:`r_{pb}` is nonzero. The relation between the t-statistic for comparing two independent groups and :math:`r_{pb}` is given by: .. math:: t = \sqrt{N - 2}\frac{r_{pb}}{\sqrt{1 - r^{2}_{pb}}} References ---------- .. [1] J. Lev, "The Point Biserial Coefficient of Correlation", Ann. Math. Statist., Vol. 20, no.1, pp. 125-126, 1949. .. [2] R.F. Tate, "Correlation Between a Discrete and a Continuous Variable. Point-Biserial Correlation.", Ann. Math. Statist., Vol. 25, np. 3, pp. 603-607, 1954. .. [3] D. Kornbrot "Point Biserial Correlation", In Wiley StatsRef: Statistics Reference Online (eds N. Balakrishnan, et al.), 2014. https://doi.org/10.1002/9781118445112.stat06227 Examples -------- >>> from scipy import stats >>> a = np.array([0, 0, 0, 1, 1, 1, 1]) >>> b = np.arange(7) >>> stats.pointbiserialr(a, b) (0.8660254037844386, 0.011724811003954652) >>> stats.pearsonr(a, b) (0.86602540378443871, 0.011724811003954626) >>> np.corrcoef(a, b) array([[ 1. , 0.8660254], [ 0.8660254, 1. ]]) """ rpb, prob = pearsonr(x, y) return PointbiserialrResult(rpb, prob) KendalltauResult = namedtuple('KendalltauResult', ('correlation', 'pvalue')) def kendalltau(x, y, initial_lexsort=None, nan_policy='propagate', method='auto'): """ Calculate Kendall's tau, a correlation measure for ordinal data. Kendall's tau is a measure of the correspondence between two rankings. Values close to 1 indicate strong agreement, values close to -1 indicate strong disagreement. This is the 1945 "tau-b" version of Kendall's tau [2]_, which can account for ties and which reduces to the 1938 "tau-a" version [1]_ in absence of ties. Parameters ---------- x, y : array_like Arrays of rankings, of the same shape. If arrays are not 1-D, they will be flattened to 1-D. initial_lexsort : bool, optional Unused (deprecated). nan_policy : {'propagate', 'raise', 'omit'}, optional Defines how to handle when input contains nan. 'propagate' returns nan, 'raise' throws an error, 'omit' performs the calculations ignoring nan values. Default is 'propagate'. Note that if the input contains nan 'omit' delegates to mstats_basic.kendalltau(), which has a different implementation. method: {'auto', 'asymptotic', 'exact'}, optional Defines which method is used to calculate the p-value [5]_. 'asymptotic' uses a normal approximation valid for large samples. 'exact' computes the exact p-value, but can only be used if no ties are present. 'auto' is the default and selects the appropriate method based on a trade-off between speed and accuracy. Returns ------- correlation : float The tau statistic. pvalue : float The two-sided p-value for a hypothesis test whose null hypothesis is an absence of association, tau = 0. See also -------- spearmanr : Calculates a Spearman rank-order correlation coefficient. theilslopes : Computes the Theil-Sen estimator for a set of points (x, y). weightedtau : Computes a weighted version of Kendall's tau. Notes ----- The definition of Kendall's tau that is used is [2]_:: tau = (P - Q) / sqrt((P + Q + T) * (P + Q + U)) where P is the number of concordant pairs, Q the number of discordant pairs, T the number of ties only in `x`, and U the number of ties only in `y`. If a tie occurs for the same pair in both `x` and `y`, it is not added to either T or U. References ---------- .. [1] Maurice G. Kendall, "A New Measure of Rank Correlation", Biometrika Vol. 30, No. 1/2, pp. 81-93, 1938. .. [2] Maurice G. Kendall, "The treatment of ties in ranking problems", Biometrika Vol. 33, No. 3, pp. 239-251. 1945. .. [3] Gottfried E. Noether, "Elements of Nonparametric Statistics", John Wiley & Sons, 1967. .. [4] Peter M. Fenwick, "A new data structure for cumulative frequency tables", Software: Practice and Experience, Vol. 24, No. 3, pp. 327-336, 1994. .. [5] Maurice G. Kendall, "Rank Correlation Methods" (4th Edition), Charles Griffin & Co., 1970. Examples -------- >>> from scipy import stats >>> x1 = [12, 2, 1, 12, 2] >>> x2 = [1, 4, 7, 1, 0] >>> tau, p_value = stats.kendalltau(x1, x2) >>> tau -0.47140452079103173 >>> p_value 0.2827454599327748 """ x = np.asarray(x).ravel() y = np.asarray(y).ravel() if x.size != y.size: raise ValueError("All inputs to `kendalltau` must be of the same size, " "found x-size %s and y-size %s" % (x.size, y.size)) elif not x.size or not y.size: return KendalltauResult(np.nan, np.nan) # Return NaN if arrays are empty # check both x and y cnx, npx = _contains_nan(x, nan_policy) cny, npy = _contains_nan(y, nan_policy) contains_nan = cnx or cny if npx == 'omit' or npy == 'omit': nan_policy = 'omit' if contains_nan and nan_policy == 'propagate': return KendalltauResult(np.nan, np.nan) elif contains_nan and nan_policy == 'omit': x = ma.masked_invalid(x) y = ma.masked_invalid(y) return mstats_basic.kendalltau(x, y, method=method) if initial_lexsort is not None: # deprecate to drop! warnings.warn('"initial_lexsort" is gone!') def count_rank_tie(ranks): cnt = np.bincount(ranks).astype('int64', copy=False) cnt = cnt[cnt > 1] return ((cnt * (cnt - 1) // 2).sum(), (cnt * (cnt - 1.) * (cnt - 2)).sum(), (cnt * (cnt - 1.) * (2*cnt + 5)).sum()) size = x.size perm = np.argsort(y) # sort on y and convert y to dense ranks x, y = x[perm], y[perm] y = np.r_[True, y[1:] != y[:-1]].cumsum(dtype=np.intp) # stable sort on x and convert x to dense ranks perm = np.argsort(x, kind='mergesort') x, y = x[perm], y[perm] x = np.r_[True, x[1:] != x[:-1]].cumsum(dtype=np.intp) dis = _kendall_dis(x, y) # discordant pairs obs = np.r_[True, (x[1:] != x[:-1]) | (y[1:] != y[:-1]), True] cnt = np.diff(np.nonzero(obs)[0]).astype('int64', copy=False) ntie = (cnt * (cnt - 1) // 2).sum() # joint ties xtie, x0, x1 = count_rank_tie(x) # ties in x, stats ytie, y0, y1 = count_rank_tie(y) # ties in y, stats tot = (size * (size - 1)) // 2 if xtie == tot or ytie == tot: return KendalltauResult(np.nan, np.nan) # Note that tot = con + dis + (xtie - ntie) + (ytie - ntie) + ntie # = con + dis + xtie + ytie - ntie con_minus_dis = tot - xtie - ytie + ntie - 2 * dis tau = con_minus_dis / np.sqrt(tot - xtie) / np.sqrt(tot - ytie) # Limit range to fix computational errors tau = min(1., max(-1., tau)) if method == 'exact' and (xtie != 0 or ytie != 0): raise ValueError("Ties found, exact method cannot be used.") if method == 'auto': if (xtie == 0 and ytie == 0) and (size <= 33 or min(dis, tot-dis) <= 1): method = 'exact' else: method = 'asymptotic' if xtie == 0 and ytie == 0 and method == 'exact': # Exact p-value, see Maurice G. Kendall, "Rank Correlation Methods" (4th Edition), Charles Griffin & Co., 1970. c = min(dis, tot-dis) if size <= 0: raise ValueError elif c < 0 or 2*c > size*(size-1): raise ValueError elif size == 1: pvalue = 1.0 elif size == 2: pvalue = 1.0 elif c == 0: pvalue = 2.0/math.factorial(size) if size < 171 else 0.0 elif c == 1: pvalue = 2.0/math.factorial(size-1) if (size-1) < 171 else 0.0 else: new = [0.0]*(c+1) new[0] = 1.0 new[1] = 1.0 for j in range(3,size+1): old = new[:] for k in range(1,min(j,c+1)): new[k] += new[k-1] for k in range(j,c+1): new[k] += new[k-1] - old[k-j] pvalue = 2.0*sum(new)/math.factorial(size) if size < 171 else 0.0 elif method == 'asymptotic': # con_minus_dis is approx normally distributed with this variance [3]_ var = (size * (size - 1) * (2.*size + 5) - x1 - y1) / 18. + ( 2. * xtie * ytie) / (size * (size - 1)) + x0 * y0 / (9. * size * (size - 1) * (size - 2)) pvalue = special.erfc(np.abs(con_minus_dis) / np.sqrt(var) / np.sqrt(2)) else: raise ValueError("Unknown method "+str(method)+" specified, please use auto, exact or asymptotic.") return KendalltauResult(tau, pvalue) WeightedTauResult = namedtuple('WeightedTauResult', ('correlation', 'pvalue')) def weightedtau(x, y, rank=True, weigher=None, additive=True): r""" Compute a weighted version of Kendall's :math:`\tau`. The weighted :math:`\tau` is a weighted version of Kendall's :math:`\tau` in which exchanges of high weight are more influential than exchanges of low weight. The default parameters compute the additive hyperbolic version of the index, :math:`\tau_\mathrm h`, which has been shown to provide the best balance between important and unimportant elements [1]_. The weighting is defined by means of a rank array, which assigns a nonnegative rank to each element, and a weigher function, which assigns a weight based from the rank to each element. The weight of an exchange is then the sum or the product of the weights of the ranks of the exchanged elements. The default parameters compute :math:`\tau_\mathrm h`: an exchange between elements with rank :math:`r` and :math:`s` (starting from zero) has weight :math:`1/(r+1) + 1/(s+1)`. Specifying a rank array is meaningful only if you have in mind an external criterion of importance. If, as it usually happens, you do not have in mind a specific rank, the weighted :math:`\tau` is defined by averaging the values obtained using the decreasing lexicographical rank by (`x`, `y`) and by (`y`, `x`). This is the behavior with default parameters. Note that if you are computing the weighted :math:`\tau` on arrays of ranks, rather than of scores (i.e., a larger value implies a lower rank) you must negate the ranks, so that elements of higher rank are associated with a larger value. Parameters ---------- x, y : array_like Arrays of scores, of the same shape. If arrays are not 1-D, they will be flattened to 1-D. rank: array_like of ints or bool, optional A nonnegative rank assigned to each element. If it is None, the decreasing lexicographical rank by (`x`, `y`) will be used: elements of higher rank will be those with larger `x`-values, using `y`-values to break ties (in particular, swapping `x` and `y` will give a different result). If it is False, the element indices will be used directly as ranks. The default is True, in which case this function returns the average of the values obtained using the decreasing lexicographical rank by (`x`, `y`) and by (`y`, `x`). weigher : callable, optional The weigher function. Must map nonnegative integers (zero representing the most important element) to a nonnegative weight. The default, None, provides hyperbolic weighing, that is, rank :math:`r` is mapped to weight :math:`1/(r+1)`. additive : bool, optional If True, the weight of an exchange is computed by adding the weights of the ranks of the exchanged elements; otherwise, the weights are multiplied. The default is True. Returns ------- correlation : float The weighted :math:`\tau` correlation index. pvalue : float Presently ``np.nan``, as the null statistics is unknown (even in the additive hyperbolic case). See also -------- kendalltau : Calculates Kendall's tau. spearmanr : Calculates a Spearman rank-order correlation coefficient. theilslopes : Computes the Theil-Sen estimator for a set of points (x, y). Notes ----- This function uses an :math:`O(n \log n)`, mergesort-based algorithm [1]_ that is a weighted extension of Knight's algorithm for Kendall's :math:`\tau` [2]_. It can compute Shieh's weighted :math:`\tau` [3]_ between rankings without ties (i.e., permutations) by setting `additive` and `rank` to False, as the definition given in [1]_ is a generalization of Shieh's. NaNs are considered the smallest possible score. .. versionadded:: 0.19.0 References ---------- .. [1] Sebastiano Vigna, "A weighted correlation index for rankings with ties", Proceedings of the 24th international conference on World Wide Web, pp. 1166-1176, ACM, 2015. .. [2] W.R. Knight, "A Computer Method for Calculating Kendall's Tau with Ungrouped Data", Journal of the American Statistical Association, Vol. 61, No. 314, Part 1, pp. 436-439, 1966. .. [3] Grace S. Shieh. "A weighted Kendall's tau statistic", Statistics & Probability Letters, Vol. 39, No. 1, pp. 17-24, 1998. Examples -------- >>> from scipy import stats >>> x = [12, 2, 1, 12, 2] >>> y = [1, 4, 7, 1, 0] >>> tau, p_value = stats.weightedtau(x, y) >>> tau -0.56694968153682723 >>> p_value nan >>> tau, p_value = stats.weightedtau(x, y, additive=False) >>> tau -0.62205716951801038 NaNs are considered the smallest possible score: >>> x = [12, 2, 1, 12, 2] >>> y = [1, 4, 7, 1, np.nan] >>> tau, _ = stats.weightedtau(x, y) >>> tau -0.56694968153682723 This is exactly Kendall's tau: >>> x = [12, 2, 1, 12, 2] >>> y = [1, 4, 7, 1, 0] >>> tau, _ = stats.weightedtau(x, y, weigher=lambda x: 1) >>> tau -0.47140452079103173 >>> x = [12, 2, 1, 12, 2] >>> y = [1, 4, 7, 1, 0] >>> stats.weightedtau(x, y, rank=None) WeightedTauResult(correlation=-0.4157652301037516, pvalue=nan) >>> stats.weightedtau(y, x, rank=None) WeightedTauResult(correlation=-0.7181341329699028, pvalue=nan) """ x = np.asarray(x).ravel() y = np.asarray(y).ravel() if x.size != y.size: raise ValueError("All inputs to `weightedtau` must be of the same size, " "found x-size %s and y-size %s" % (x.size, y.size)) if not x.size: return WeightedTauResult(np.nan, np.nan) # Return NaN if arrays are empty # If there are NaNs we apply _toint64() if np.isnan(np.sum(x)): x = _toint64(x) if np.isnan(np.sum(x)): y = _toint64(y) # Reduce to ranks unsupported types if x.dtype != y.dtype: if x.dtype != np.int64: x = _toint64(x) if y.dtype != np.int64: y = _toint64(y) else: if x.dtype not in (np.int32, np.int64, np.float32, np.float64): x = _toint64(x) y = _toint64(y) if rank is True: return WeightedTauResult(( _weightedrankedtau(x, y, None, weigher, additive) + _weightedrankedtau(y, x, None, weigher, additive) ) / 2, np.nan) if rank is False: rank = np.arange(x.size, dtype=np.intp) elif rank is not None: rank = np.asarray(rank).ravel() if rank.size != x.size: raise ValueError("All inputs to `weightedtau` must be of the same size, " "found x-size %s and rank-size %s" % (x.size, rank.size)) return WeightedTauResult(_weightedrankedtau(x, y, rank, weigher, additive), np.nan) ##################################### # INFERENTIAL STATISTICS # ##################################### Ttest_1sampResult = namedtuple('Ttest_1sampResult', ('statistic', 'pvalue')) def ttest_1samp(a, popmean, axis=0, nan_policy='propagate'): """ Calculate the T-test for the mean of ONE group of scores. This is a two-sided test for the null hypothesis that the expected value (mean) of a sample of independent observations `a` is equal to the given population mean, `popmean`. Parameters ---------- a : array_like sample observation popmean : float or array_like expected value in null hypothesis. If array_like, then it must have the same shape as `a` excluding the axis dimension axis : int or None, optional Axis along which to compute test. If None, compute over the whole array `a`. nan_policy : {'propagate', 'raise', 'omit'}, optional Defines how to handle when input contains nan. 'propagate' returns nan, 'raise' throws an error, 'omit' performs the calculations ignoring nan values. Default is 'propagate'. Returns ------- statistic : float or array t-statistic pvalue : float or array two-tailed p-value Examples -------- >>> from scipy import stats >>> np.random.seed(7654567) # fix seed to get the same result >>> rvs = stats.norm.rvs(loc=5, scale=10, size=(50,2)) Test if mean of random sample is equal to true mean, and different mean. We reject the null hypothesis in the second case and don't reject it in the first case. >>> stats.ttest_1samp(rvs,5.0) (array([-0.68014479, -0.04323899]), array([ 0.49961383, 0.96568674])) >>> stats.ttest_1samp(rvs,0.0) (array([ 2.77025808, 4.11038784]), array([ 0.00789095, 0.00014999])) Examples using axis and non-scalar dimension for population mean. >>> stats.ttest_1samp(rvs,[5.0,0.0]) (array([-0.68014479, 4.11038784]), array([ 4.99613833e-01, 1.49986458e-04])) >>> stats.ttest_1samp(rvs.T,[5.0,0.0],axis=1) (array([-0.68014479, 4.11038784]), array([ 4.99613833e-01, 1.49986458e-04])) >>> stats.ttest_1samp(rvs,[[5.0],[0.0]]) (array([[-0.68014479, -0.04323899], [ 2.77025808, 4.11038784]]), array([[ 4.99613833e-01, 9.65686743e-01], [ 7.89094663e-03, 1.49986458e-04]])) """ a, axis = _chk_asarray(a, axis) contains_nan, nan_policy = _contains_nan(a, nan_policy) if contains_nan and nan_policy == 'omit': a = ma.masked_invalid(a) return mstats_basic.ttest_1samp(a, popmean, axis) n = a.shape[axis] df = n - 1 d = np.mean(a, axis) - popmean v = np.var(a, axis, ddof=1) denom = np.sqrt(v / n) with np.errstate(divide='ignore', invalid='ignore'): t = np.divide(d, denom) t, prob = _ttest_finish(df, t) return Ttest_1sampResult(t, prob) def _ttest_finish(df, t): """Common code between all 3 t-test functions.""" prob = distributions.t.sf(np.abs(t), df) * 2 # use np.abs to get upper tail if t.ndim == 0: t = t[()] return t, prob def _ttest_ind_from_stats(mean1, mean2, denom, df): d = mean1 - mean2 with np.errstate(divide='ignore', invalid='ignore'): t = np.divide(d, denom) t, prob = _ttest_finish(df, t) return (t, prob) def _unequal_var_ttest_denom(v1, n1, v2, n2): vn1 = v1 / n1 vn2 = v2 / n2 with np.errstate(divide='ignore', invalid='ignore'): df = (vn1 + vn2)**2 / (vn1**2 / (n1 - 1) + vn2**2 / (n2 - 1)) # If df is undefined, variances are zero (assumes n1 > 0 & n2 > 0). # Hence it doesn't matter what df is as long as it's not NaN. df = np.where(np.isnan(df), 1, df) denom = np.sqrt(vn1 + vn2) return df, denom def _equal_var_ttest_denom(v1, n1, v2, n2): df = n1 + n2 - 2.0 svar = ((n1 - 1) * v1 + (n2 - 1) * v2) / df denom = np.sqrt(svar * (1.0 / n1 + 1.0 / n2)) return df, denom Ttest_indResult = namedtuple('Ttest_indResult', ('statistic', 'pvalue')) def ttest_ind_from_stats(mean1, std1, nobs1, mean2, std2, nobs2, equal_var=True): r""" T-test for means of two independent samples from descriptive statistics. This is a two-sided test for the null hypothesis that two independent samples have identical average (expected) values. Parameters ---------- mean1 : array_like The mean(s) of sample 1. std1 : array_like The standard deviation(s) of sample 1. nobs1 : array_like The number(s) of observations of sample 1. mean2 : array_like The mean(s) of sample 2 std2 : array_like The standard deviations(s) of sample 2. nobs2 : array_like The number(s) of observations of sample 2. equal_var : bool, optional If True (default), perform a standard independent 2 sample test that assumes equal population variances [1]_. If False, perform Welch's t-test, which does not assume equal population variance [2]_. Returns ------- statistic : float or array The calculated t-statistics pvalue : float or array The two-tailed p-value. See Also -------- scipy.stats.ttest_ind Notes ----- .. versionadded:: 0.16.0 References ---------- .. [1] https://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test .. [2] https://en.wikipedia.org/wiki/Welch%27s_t-test Examples -------- Suppose we have the summary data for two samples, as follows:: Sample Sample Size Mean Variance Sample 1 13 15.0 87.5 Sample 2 11 12.0 39.0 Apply the t-test to this data (with the assumption that the population variances are equal): >>> from scipy.stats import ttest_ind_from_stats >>> ttest_ind_from_stats(mean1=15.0, std1=np.sqrt(87.5), nobs1=13, ... mean2=12.0, std2=np.sqrt(39.0), nobs2=11) Ttest_indResult(statistic=0.9051358093310269, pvalue=0.3751996797581487) For comparison, here is the data from which those summary statistics were taken. With this data, we can compute the same result using `scipy.stats.ttest_ind`: >>> a = np.array([1, 3, 4, 6, 11, 13, 15, 19, 22, 24, 25, 26, 26]) >>> b = np.array([2, 4, 6, 9, 11, 13, 14, 15, 18, 19, 21]) >>> from scipy.stats import ttest_ind >>> ttest_ind(a, b) Ttest_indResult(statistic=0.905135809331027, pvalue=0.3751996797581486) Suppose we instead have binary data and would like to apply a t-test to compare the proportion of 1s in two independent groups:: Number of Sample Sample Size ones Mean Variance Sample 1 150 30 0.2 0.16 Sample 2 200 45 0.225 0.174375 The sample mean :math:`\hat{p}` is the proportion of ones in the sample and the variance for a binary observation is estimated by :math:`\hat{p}(1-\hat{p})`. >>> ttest_ind_from_stats(mean1=0.2, std1=np.sqrt(0.16), nobs1=150, ... mean2=0.225, std2=np.sqrt(0.17437), nobs2=200) Ttest_indResult(statistic=-0.564327545549774, pvalue=0.5728947691244874) For comparison, we could compute the t statistic and p-value using arrays of 0s and 1s and `scipy.stat.ttest_ind`, as above. >>> group1 = np.array([1]*30 + [0]*(150-30)) >>> group2 = np.array([1]*45 + [0]*(200-45)) >>> ttest_ind(group1, group2) Ttest_indResult(statistic=-0.5627179589855622, pvalue=0.573989277115258) """ if equal_var: df, denom = _equal_var_ttest_denom(std1**2, nobs1, std2**2, nobs2) else: df, denom = _unequal_var_ttest_denom(std1**2, nobs1, std2**2, nobs2) res = _ttest_ind_from_stats(mean1, mean2, denom, df) return Ttest_indResult(*res) def ttest_ind(a, b, axis=0, equal_var=True, nan_policy='propagate'): """ Calculate the T-test for the means of *two independent* samples of scores. This is a two-sided test for the null hypothesis that 2 independent samples have identical average (expected) values. This test assumes that the populations have identical variances by default. Parameters ---------- a, b : array_like The arrays must have the same shape, except in the dimension corresponding to `axis` (the first, by default). axis : int or None, optional Axis along which to compute test. If None, compute over the whole arrays, `a`, and `b`. equal_var : bool, optional If True (default), perform a standard independent 2 sample test that assumes equal population variances [1]_. If False, perform Welch's t-test, which does not assume equal population variance [2]_. .. versionadded:: 0.11.0 nan_policy : {'propagate', 'raise', 'omit'}, optional Defines how to handle when input contains nan. 'propagate' returns nan, 'raise' throws an error, 'omit' performs the calculations ignoring nan values. Default is 'propagate'. Returns ------- statistic : float or array The calculated t-statistic. pvalue : float or array The two-tailed p-value. Notes ----- We can use this test, if we observe two independent samples from the same or different population, e.g. exam scores of boys and girls or of two ethnic groups. The test measures whether the average (expected) value differs significantly across samples. If we observe a large p-value, for example larger than 0.05 or 0.1, then we cannot reject the null hypothesis of identical average scores. If the p-value is smaller than the threshold, e.g. 1%, 5% or 10%, then we reject the null hypothesis of equal averages. References ---------- .. [1] https://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test .. [2] https://en.wikipedia.org/wiki/Welch%27s_t-test Examples -------- >>> from scipy import stats >>> np.random.seed(12345678) Test with sample with identical means: >>> rvs1 = stats.norm.rvs(loc=5,scale=10,size=500) >>> rvs2 = stats.norm.rvs(loc=5,scale=10,size=500) >>> stats.ttest_ind(rvs1,rvs2) (0.26833823296239279, 0.78849443369564776) >>> stats.ttest_ind(rvs1,rvs2, equal_var = False) (0.26833823296239279, 0.78849452749500748) `ttest_ind` underestimates p for unequal variances: >>> rvs3 = stats.norm.rvs(loc=5, scale=20, size=500) >>> stats.ttest_ind(rvs1, rvs3) (-0.46580283298287162, 0.64145827413436174) >>> stats.ttest_ind(rvs1, rvs3, equal_var = False) (-0.46580283298287162, 0.64149646246569292) When n1 != n2, the equal variance t-statistic is no longer equal to the unequal variance t-statistic: >>> rvs4 = stats.norm.rvs(loc=5, scale=20, size=100) >>> stats.ttest_ind(rvs1, rvs4) (-0.99882539442782481, 0.3182832709103896) >>> stats.ttest_ind(rvs1, rvs4, equal_var = False) (-0.69712570584654099, 0.48716927725402048) T-test with different means, variance, and n: >>> rvs5 = stats.norm.rvs(loc=8, scale=20, size=100) >>> stats.ttest_ind(rvs1, rvs5) (-1.4679669854490653, 0.14263895620529152) >>> stats.ttest_ind(rvs1, rvs5, equal_var = False) (-0.94365973617132992, 0.34744170334794122) """ a, b, axis = _chk2_asarray(a, b, axis) # check both a and b cna, npa = _contains_nan(a, nan_policy) cnb, npb = _contains_nan(b, nan_policy) contains_nan = cna or cnb if npa == 'omit' or npb == 'omit': nan_policy = 'omit' if contains_nan and nan_policy == 'omit': a = ma.masked_invalid(a) b = ma.masked_invalid(b) return mstats_basic.ttest_ind(a, b, axis, equal_var) if a.size == 0 or b.size == 0: return Ttest_indResult(np.nan, np.nan) v1 = np.var(a, axis, ddof=1) v2 = np.var(b, axis, ddof=1) n1 = a.shape[axis] n2 = b.shape[axis] if equal_var: df, denom = _equal_var_ttest_denom(v1, n1, v2, n2) else: df, denom = _unequal_var_ttest_denom(v1, n1, v2, n2) res = _ttest_ind_from_stats(np.mean(a, axis), np.mean(b, axis), denom, df) return Ttest_indResult(*res) Ttest_relResult = namedtuple('Ttest_relResult', ('statistic', 'pvalue')) def ttest_rel(a, b, axis=0, nan_policy='propagate'): """ Calculate the T-test on TWO RELATED samples of scores, a and b. This is a two-sided test for the null hypothesis that 2 related or repeated samples have identical average (expected) values. Parameters ---------- a, b : array_like The arrays must have the same shape. axis : int or None, optional Axis along which to compute test. If None, compute over the whole arrays, `a`, and `b`. nan_policy : {'propagate', 'raise', 'omit'}, optional Defines how to handle when input contains nan. 'propagate' returns nan, 'raise' throws an error, 'omit' performs the calculations ignoring nan values. Default is 'propagate'. Returns ------- statistic : float or array t-statistic pvalue : float or array two-tailed p-value Notes ----- Examples for the use are scores of the same set of student in different exams, or repeated sampling from the same units. The test measures whether the average score differs significantly across samples (e.g. exams). If we observe a large p-value, for example greater than 0.05 or 0.1 then we cannot reject the null hypothesis of identical average scores. If the p-value is smaller than the threshold, e.g. 1%, 5% or 10%, then we reject the null hypothesis of equal averages. Small p-values are associated with large t-statistics. References ---------- https://en.wikipedia.org/wiki/T-test#Dependent_t-test_for_paired_samples Examples -------- >>> from scipy import stats >>> np.random.seed(12345678) # fix random seed to get same numbers >>> rvs1 = stats.norm.rvs(loc=5,scale=10,size=500) >>> rvs2 = (stats.norm.rvs(loc=5,scale=10,size=500) + ... stats.norm.rvs(scale=0.2,size=500)) >>> stats.ttest_rel(rvs1,rvs2) (0.24101764965300962, 0.80964043445811562) >>> rvs3 = (stats.norm.rvs(loc=8,scale=10,size=500) + ... stats.norm.rvs(scale=0.2,size=500)) >>> stats.ttest_rel(rvs1,rvs3) (-3.9995108708727933, 7.3082402191726459e-005) """ a, b, axis = _chk2_asarray(a, b, axis) cna, npa = _contains_nan(a, nan_policy) cnb, npb = _contains_nan(b, nan_policy) contains_nan = cna or cnb if npa == 'omit' or npb == 'omit': nan_policy = 'omit' if contains_nan and nan_policy == 'omit': a = ma.masked_invalid(a) b = ma.masked_invalid(b) m = ma.mask_or(ma.getmask(a), ma.getmask(b)) aa = ma.array(a, mask=m, copy=True) bb = ma.array(b, mask=m, copy=True) return mstats_basic.ttest_rel(aa, bb, axis) if a.shape[axis] != b.shape[axis]: raise ValueError('unequal length arrays') if a.size == 0 or b.size == 0: return np.nan, np.nan n = a.shape[axis] df = n - 1 d = (a - b).astype(np.float64) v = np.var(d, axis, ddof=1) dm = np.mean(d, axis) denom = np.sqrt(v / n) with np.errstate(divide='ignore', invalid='ignore'): t = np.divide(dm, denom) t, prob = _ttest_finish(df, t) return Ttest_relResult(t, prob) KstestResult = namedtuple('KstestResult', ('statistic', 'pvalue')) def kstest(rvs, cdf, args=(), N=20, alternative='two-sided', mode='approx'): """ Perform the Kolmogorov-Smirnov test for goodness of fit. This performs a test of the distribution F(x) of an observed random variable against a given distribution G(x). Under the null hypothesis the two distributions are identical, F(x)=G(x). The alternative hypothesis can be either 'two-sided' (default), 'less' or 'greater'. The KS test is only valid for continuous distributions. Parameters ---------- rvs : str, array or callable If a string, it should be the name of a distribution in `scipy.stats`. If an array, it should be a 1-D array of observations of random variables. If a callable, it should be a function to generate random variables; it is required to have a keyword argument `size`. cdf : str or callable If a string, it should be the name of a distribution in `scipy.stats`. If `rvs` is a string then `cdf` can be False or the same as `rvs`. If a callable, that callable is used to calculate the cdf. args : tuple, sequence, optional Distribution parameters, used if `rvs` or `cdf` are strings. N : int, optional Sample size if `rvs` is string or callable. Default is 20. alternative : {'two-sided', 'less','greater'}, optional Defines the alternative hypothesis (see explanation above). Default is 'two-sided'. mode : 'approx' (default) or 'asymp', optional Defines the distribution used for calculating the p-value. - 'approx' : use approximation to exact distribution of test statistic - 'asymp' : use asymptotic distribution of test statistic Returns ------- statistic : float KS test statistic, either D, D+ or D-. pvalue : float One-tailed or two-tailed p-value. Notes ----- In the one-sided test, the alternative is that the empirical cumulative distribution function of the random variable is "less" or "greater" than the cumulative distribution function G(x) of the hypothesis, ``F(x)<=G(x)``, resp. ``F(x)>=G(x)``. Examples -------- >>> from scipy import stats >>> x = np.linspace(-15, 15, 9) >>> stats.kstest(x, 'norm') (0.44435602715924361, 0.038850142705171065) >>> np.random.seed(987654321) # set random seed to get the same result >>> stats.kstest('norm', False, N=100) (0.058352892479417884, 0.88531190944151261) The above lines are equivalent to: >>> np.random.seed(987654321) >>> stats.kstest(stats.norm.rvs(size=100), 'norm') (0.058352892479417884, 0.88531190944151261) *Test against one-sided alternative hypothesis* Shift distribution to larger values, so that ``cdf_dgp(x) < norm.cdf(x)``: >>> np.random.seed(987654321) >>> x = stats.norm.rvs(loc=0.2, size=100) >>> stats.kstest(x,'norm', alternative = 'less') (0.12464329735846891, 0.040989164077641749) Reject equal distribution against alternative hypothesis: less >>> stats.kstest(x,'norm', alternative = 'greater') (0.0072115233216311081, 0.98531158590396395) Don't reject equal distribution against alternative hypothesis: greater >>> stats.kstest(x,'norm', mode='asymp') (0.12464329735846891, 0.08944488871182088) *Testing t distributed random variables against normal distribution* With 100 degrees of freedom the t distribution looks close to the normal distribution, and the K-S test does not reject the hypothesis that the sample came from the normal distribution: >>> np.random.seed(987654321) >>> stats.kstest(stats.t.rvs(100,size=100),'norm') (0.072018929165471257, 0.67630062862479168) With 3 degrees of freedom the t distribution looks sufficiently different from the normal distribution, that we can reject the hypothesis that the sample came from the normal distribution at the 10% level: >>> np.random.seed(987654321) >>> stats.kstest(stats.t.rvs(3,size=100),'norm') (0.131016895759829, 0.058826222555312224) """ if isinstance(rvs, string_types): if (not cdf) or (cdf == rvs): cdf = getattr(distributions, rvs).cdf rvs = getattr(distributions, rvs).rvs else: raise AttributeError("if rvs is string, cdf has to be the " "same distribution") if isinstance(cdf, string_types): cdf = getattr(distributions, cdf).cdf if callable(rvs): kwds = {'size': N} vals = np.sort(rvs(*args, **kwds)) else: vals = np.sort(rvs) N = len(vals) cdfvals = cdf(vals, *args) # to not break compatibility with existing code if alternative == 'two_sided': alternative = 'two-sided' if alternative in ['two-sided', 'greater']: Dplus = (np.arange(1.0, N + 1)/N - cdfvals).max() if alternative == 'greater': return KstestResult(Dplus, distributions.ksone.sf(Dplus, N)) if alternative in ['two-sided', 'less']: Dmin = (cdfvals - np.arange(0.0, N)/N).max() if alternative == 'less': return KstestResult(Dmin, distributions.ksone.sf(Dmin, N)) if alternative == 'two-sided': D = np.max([Dplus, Dmin]) if mode == 'asymp': return KstestResult(D, distributions.kstwobign.sf(D * np.sqrt(N))) if mode == 'approx': pval_two = distributions.kstwobign.sf(D * np.sqrt(N)) if N > 2666 or pval_two > 0.80 - N*0.3/1000: return KstestResult(D, pval_two) else: return KstestResult(D, 2 * distributions.ksone.sf(D, N)) # Map from names to lambda_ values used in power_divergence(). _power_div_lambda_names = { "pearson": 1, "log-likelihood": 0, "freeman-tukey": -0.5, "mod-log-likelihood": -1, "neyman": -2, "cressie-read": 2/3, } def _count(a, axis=None): """ Count the number of non-masked elements of an array. This function behaves like np.ma.count(), but is much faster for ndarrays. """ if hasattr(a, 'count'): num = a.count(axis=axis) if isinstance(num, np.ndarray) and num.ndim == 0: # In some cases, the `count` method returns a scalar array (e.g. # np.array(3)), but we want a plain integer. num = int(num) else: if axis is None: num = a.size else: num = a.shape[axis] return num Power_divergenceResult = namedtuple('Power_divergenceResult', ('statistic', 'pvalue')) def power_divergence(f_obs, f_exp=None, ddof=0, axis=0, lambda_=None): """ Cressie-Read power divergence statistic and goodness of fit test. This function tests the null hypothesis that the categorical data has the given frequencies, using the Cressie-Read power divergence statistic. Parameters ---------- f_obs : array_like Observed frequencies in each category. f_exp : array_like, optional Expected frequencies in each category. By default the categories are assumed to be equally likely. ddof : int, optional "Delta degrees of freedom": adjustment to the degrees of freedom for the p-value. The p-value is computed using a chi-squared distribution with ``k - 1 - ddof`` degrees of freedom, where `k` is the number of observed frequencies. The default value of `ddof` is 0. axis : int or None, optional The axis of the broadcast result of `f_obs` and `f_exp` along which to apply the test. If axis is None, all values in `f_obs` are treated as a single data set. Default is 0. lambda_ : float or str, optional `lambda_` gives the power in the Cressie-Read power divergence statistic. The default is 1. For convenience, `lambda_` may be assigned one of the following strings, in which case the corresponding numerical value is used:: String Value Description "pearson" 1 Pearson's chi-squared statistic. In this case, the function is equivalent to `stats.chisquare`. "log-likelihood" 0 Log-likelihood ratio. Also known as the G-test [3]_. "freeman-tukey" -1/2 Freeman-Tukey statistic. "mod-log-likelihood" -1 Modified log-likelihood ratio. "neyman" -2 Neyman's statistic. "cressie-read" 2/3 The power recommended in [5]_. Returns ------- statistic : float or ndarray The Cressie-Read power divergence test statistic. The value is a float if `axis` is None or if` `f_obs` and `f_exp` are 1-D. pvalue : float or ndarray The p-value of the test. The value is a float if `ddof` and the return value `stat` are scalars. See Also -------- chisquare Notes ----- This test is invalid when the observed or expected frequencies in each category are too small. A typical rule is that all of the observed and expected frequencies should be at least 5. When `lambda_` is less than zero, the formula for the statistic involves dividing by `f_obs`, so a warning or error may be generated if any value in `f_obs` is 0. Similarly, a warning or error may be generated if any value in `f_exp` is zero when `lambda_` >= 0. The default degrees of freedom, k-1, are for the case when no parameters of the distribution are estimated. If p parameters are estimated by efficient maximum likelihood then the correct degrees of freedom are k-1-p. If the parameters are estimated in a different way, then the dof can be between k-1-p and k-1. However, it is also possible that the asymptotic distribution is not a chisquare, in which case this test is not appropriate. This function handles masked arrays. If an element of `f_obs` or `f_exp` is masked, then data at that position is ignored, and does not count towards the size of the data set. .. versionadded:: 0.13.0 References ---------- .. [1] Lowry, Richard. "Concepts and Applications of Inferential Statistics". Chapter 8. https://web.archive.org/web/20171015035606/http://faculty.vassar.edu/lowry/ch8pt1.html .. [2] "Chi-squared test", https://en.wikipedia.org/wiki/Chi-squared_test .. [3] "G-test", https://en.wikipedia.org/wiki/G-test .. [4] Sokal, R. R. and Rohlf, F. J. "Biometry: the principles and practice of statistics in biological research", New York: Freeman (1981) .. [5] Cressie, N. and Read, T. R. C., "Multinomial Goodness-of-Fit Tests", J. Royal Stat. Soc. Series B, Vol. 46, No. 3 (1984), pp. 440-464. Examples -------- (See `chisquare` for more examples.) When just `f_obs` is given, it is assumed that the expected frequencies are uniform and given by the mean of the observed frequencies. Here we perform a G-test (i.e. use the log-likelihood ratio statistic): >>> from scipy.stats import power_divergence >>> power_divergence([16, 18, 16, 14, 12, 12], lambda_='log-likelihood') (2.006573162632538, 0.84823476779463769) The expected frequencies can be given with the `f_exp` argument: >>> power_divergence([16, 18, 16, 14, 12, 12], ... f_exp=[16, 16, 16, 16, 16, 8], ... lambda_='log-likelihood') (3.3281031458963746, 0.6495419288047497) When `f_obs` is 2-D, by default the test is applied to each column. >>> obs = np.array([[16, 18, 16, 14, 12, 12], [32, 24, 16, 28, 20, 24]]).T >>> obs.shape (6, 2) >>> power_divergence(obs, lambda_="log-likelihood") (array([ 2.00657316, 6.77634498]), array([ 0.84823477, 0.23781225])) By setting ``axis=None``, the test is applied to all data in the array, which is equivalent to applying the test to the flattened array. >>> power_divergence(obs, axis=None) (23.31034482758621, 0.015975692534127565) >>> power_divergence(obs.ravel()) (23.31034482758621, 0.015975692534127565) `ddof` is the change to make to the default degrees of freedom. >>> power_divergence([16, 18, 16, 14, 12, 12], ddof=1) (2.0, 0.73575888234288467) The calculation of the p-values is done by broadcasting the test statistic with `ddof`. >>> power_divergence([16, 18, 16, 14, 12, 12], ddof=[0,1,2]) (2.0, array([ 0.84914504, 0.73575888, 0.5724067 ])) `f_obs` and `f_exp` are also broadcast. In the following, `f_obs` has shape (6,) and `f_exp` has shape (2, 6), so the result of broadcasting `f_obs` and `f_exp` has shape (2, 6). To compute the desired chi-squared statistics, we must use ``axis=1``: >>> power_divergence([16, 18, 16, 14, 12, 12], ... f_exp=[[16, 16, 16, 16, 16, 8], ... [8, 20, 20, 16, 12, 12]], ... axis=1) (array([ 3.5 , 9.25]), array([ 0.62338763, 0.09949846])) """ # Convert the input argument `lambda_` to a numerical value. if isinstance(lambda_, string_types): if lambda_ not in _power_div_lambda_names: names = repr(list(_power_div_lambda_names.keys()))[1:-1] raise ValueError("invalid string for lambda_: {0!r}. Valid strings " "are {1}".format(lambda_, names)) lambda_ = _power_div_lambda_names[lambda_] elif lambda_ is None: lambda_ = 1 f_obs = np.asanyarray(f_obs) if f_exp is not None: f_exp = np.asanyarray(f_exp) else: # Ignore 'invalid' errors so the edge case of a data set with length 0 # is handled without spurious warnings. with np.errstate(invalid='ignore'): f_exp = f_obs.mean(axis=axis, keepdims=True) # `terms` is the array of terms that are summed along `axis` to create # the test statistic. We use some specialized code for a few special # cases of lambda_. if lambda_ == 1: # Pearson's chi-squared statistic terms = (f_obs - f_exp)**2 / f_exp elif lambda_ == 0: # Log-likelihood ratio (i.e. G-test) terms = 2.0 * special.xlogy(f_obs, f_obs / f_exp) elif lambda_ == -1: # Modified log-likelihood ratio terms = 2.0 * special.xlogy(f_exp, f_exp / f_obs) else: # General Cressie-Read power divergence. terms = f_obs * ((f_obs / f_exp)**lambda_ - 1) terms /= 0.5 * lambda_ * (lambda_ + 1) stat = terms.sum(axis=axis) num_obs = _count(terms, axis=axis) ddof = asarray(ddof) p = distributions.chi2.sf(stat, num_obs - 1 - ddof) return Power_divergenceResult(stat, p) def chisquare(f_obs, f_exp=None, ddof=0, axis=0): """ Calculate a one-way chi square test. The chi square test tests the null hypothesis that the categorical data has the given frequencies. Parameters ---------- f_obs : array_like Observed frequencies in each category. f_exp : array_like, optional Expected frequencies in each category. By default the categories are assumed to be equally likely. ddof : int, optional "Delta degrees of freedom": adjustment to the degrees of freedom for the p-value. The p-value is computed using a chi-squared distribution with ``k - 1 - ddof`` degrees of freedom, where `k` is the number of observed frequencies. The default value of `ddof` is 0. axis : int or None, optional The axis of the broadcast result of `f_obs` and `f_exp` along which to apply the test. If axis is None, all values in `f_obs` are treated as a single data set. Default is 0. Returns ------- chisq : float or ndarray The chi-squared test statistic. The value is a float if `axis` is None or `f_obs` and `f_exp` are 1-D. p : float or ndarray The p-value of the test. The value is a float if `ddof` and the return value `chisq` are scalars. See Also -------- scipy.stats.power_divergence Notes ----- This test is invalid when the observed or expected frequencies in each category are too small. A typical rule is that all of the observed and expected frequencies should be at least 5. The default degrees of freedom, k-1, are for the case when no parameters of the distribution are estimated. If p parameters are estimated by efficient maximum likelihood then the correct degrees of freedom are k-1-p. If the parameters are estimated in a different way, then the dof can be between k-1-p and k-1. However, it is also possible that the asymptotic distribution is not a chisquare, in which case this test is not appropriate. References ---------- .. [1] Lowry, Richard. "Concepts and Applications of Inferential Statistics". Chapter 8. https://web.archive.org/web/20171022032306/http://vassarstats.net:80/textbook/ch8pt1.html .. [2] "Chi-squared test", https://en.wikipedia.org/wiki/Chi-squared_test Examples -------- When just `f_obs` is given, it is assumed that the expected frequencies are uniform and given by the mean of the observed frequencies. >>> from scipy.stats import chisquare >>> chisquare([16, 18, 16, 14, 12, 12]) (2.0, 0.84914503608460956) With `f_exp` the expected frequencies can be given. >>> chisquare([16, 18, 16, 14, 12, 12], f_exp=[16, 16, 16, 16, 16, 8]) (3.5, 0.62338762774958223) When `f_obs` is 2-D, by default the test is applied to each column. >>> obs = np.array([[16, 18, 16, 14, 12, 12], [32, 24, 16, 28, 20, 24]]).T >>> obs.shape (6, 2) >>> chisquare(obs) (array([ 2. , 6.66666667]), array([ 0.84914504, 0.24663415])) By setting ``axis=None``, the test is applied to all data in the array, which is equivalent to applying the test to the flattened array. >>> chisquare(obs, axis=None) (23.31034482758621, 0.015975692534127565) >>> chisquare(obs.ravel()) (23.31034482758621, 0.015975692534127565) `ddof` is the change to make to the default degrees of freedom. >>> chisquare([16, 18, 16, 14, 12, 12], ddof=1) (2.0, 0.73575888234288467) The calculation of the p-values is done by broadcasting the chi-squared statistic with `ddof`. >>> chisquare([16, 18, 16, 14, 12, 12], ddof=[0,1,2]) (2.0, array([ 0.84914504, 0.73575888, 0.5724067 ])) `f_obs` and `f_exp` are also broadcast. In the following, `f_obs` has shape (6,) and `f_exp` has shape (2, 6), so the result of broadcasting `f_obs` and `f_exp` has shape (2, 6). To compute the desired chi-squared statistics, we use ``axis=1``: >>> chisquare([16, 18, 16, 14, 12, 12], ... f_exp=[[16, 16, 16, 16, 16, 8], [8, 20, 20, 16, 12, 12]], ... axis=1) (array([ 3.5 , 9.25]), array([ 0.62338763, 0.09949846])) """ return power_divergence(f_obs, f_exp=f_exp, ddof=ddof, axis=axis, lambda_="pearson") Ks_2sampResult = namedtuple('Ks_2sampResult', ('statistic', 'pvalue')) def _compute_prob_inside_method(m, n, g, h): """Count the proportion of paths that stay strictly inside two diagonal lines. Parameters ---------- m : integer m > 0 n : integer n > 0 g : integer g is greatest common divisor of m and n h : integer 0 <= h <= lcm(m,n) Returns ------- p : float The proportion of paths that stay inside the two lines. Count the integer lattice paths from (0, 0) to (m, n) which satisfy |x/m - y/n| < h / lcm(m, n). The paths make steps of size +1 in either positive x or positive y directions. We generally follow Hodges' treatment of Drion/Gnedenko/Korolyuk. Hodges, J.L. Jr., "The Significance Probability of the Smirnov Two-Sample Test," Arkiv fiur Matematik, 3, No. 43 (1958), 469-86. """ # Probability is symmetrical in m, n. Computation below uses m >= n. if m < n: m, n = n, m mg = m // g ng = n // g # Count the integer lattice paths from (0, 0) to (m, n) which satisfy # |nx/g - my/g| < h. # Compute matrix A such that: # A(x, 0) = A(0, y) = 1 # A(x, y) = A(x, y-1) + A(x-1, y), for x,y>=1, except that # A(x, y) = 0 if |x/m - y/n|>= h # Probability is A(m, n)/binom(m+n, n) # Optimizations exist for m==n, m==n*p. # Only need to preserve a single column of A, and only a sliding window of it. # minj keeps track of the slide. minj, maxj = 0, min(int(np.ceil(h / mg)), n + 1) curlen = maxj - minj # Make a vector long enough to hold maximum window needed. lenA = min(2 * maxj + 2, n + 1) # This is an integer calculation, but the entries are essentially # binomial coefficients, hence grow quickly. # Scaling after each column is computed avoids dividing by a # large binomial coefficent at the end. Instead it is incorporated # one factor at a time during the computation. dtype = np.float64 A = np.zeros(lenA, dtype=dtype) # Initialize the first column A[minj:maxj] = 1 for i in range(1, m + 1): # Generate the next column. # First calculate the sliding window lastminj, lastmaxj, lastlen = minj, maxj, curlen minj = max(int(np.floor((ng * i - h) / mg)) + 1, 0) minj = min(minj, n) maxj = min(int(np.ceil((ng * i + h) / mg)), n + 1) if maxj <= minj: return 0 # Now fill in the values A[0:maxj - minj] = np.cumsum(A[minj - lastminj:maxj - lastminj]) curlen = maxj - minj if lastlen > curlen: # Set some carried-over elements to 0 A[maxj - minj:maxj - minj + (lastlen - curlen)] = 0 # Peel off one term from each of top and bottom of the binomial coefficient. scaling_factor = i * 1.0 / (n + i) A *= scaling_factor return A[maxj - minj - 1] def _compute_prob_outside_square(n, h): """Compute the proportion of paths that pass outside the two diagonal lines. Parameters ---------- n : integer n > 0 h : integer 0 <= h <= n Returns ------- p : float The proportion of paths that pass outside the lines x-y = +/-h. """ # Compute Pr(D_{n,n} >= h/n) # Prob = 2 * ( binom(2n, n-h) - binom(2n, n-2a) + binom(2n, n-3a) - ... ) / binom(2n, n) # This formulation exhibits subtractive cancellation. # Instead divide each term by binom(2n, n), then factor common terms # and use a Horner-like algorithm # P = 2 * A0 * (1 - A1*(1 - A2*(1 - A3*(1 - A4*(...))))) P = 0.0 k = int(np.floor(n / h)) while k >= 0: p1 = 1.0 # Each of the Ai terms has numerator and denominator with h simple terms. for j in range(h): p1 = (n - k * h - j) * p1 / (n + k * h + j + 1) P = p1 * (1.0 - P) k -= 1 return 2 * P def _count_paths_outside_method(m, n, g, h): """Count the number of paths that pass outside the specified diagonal. Parameters ---------- m : integer m > 0 n : integer n > 0 g : integer g is greatest common divisor of m and n h : integer 0 <= h <= lcm(m,n) Returns ------- p : float The number of paths that go low. The calculation may overflow - check for a finite answer. Exceptions ---------- FloatingPointError: Raised if the intermediate computation goes outside the range of a float. Notes ----- Count the integer lattice paths from (0, 0) to (m, n), which at some point (x, y) along the path, satisfy: m*y <= n*x - h*g The paths make steps of size +1 in either positive x or positive y directions. We generally follow Hodges' treatment of Drion/Gnedenko/Korolyuk. Hodges, J.L. Jr., "The Significance Probability of the Smirnov Two-Sample Test," Arkiv fiur Matematik, 3, No. 43 (1958), 469-86. """ # Compute #paths which stay lower than x/m-y/n = h/lcm(m,n) # B(x, y) = #{paths from (0,0) to (x,y) without previously crossing the boundary} # = binom(x, y) - #{paths which already reached the boundary} # Multiply by the number of path extensions going from (x, y) to (m, n) # Sum. # Probability is symmetrical in m, n. Computation below assumes m >= n. if m < n: m, n = n, m mg = m // g ng = n // g # 0 <= x_j <= m is the smallest integer for which n*x_j - m*j < g*h xj = [int(np.ceil((h + mg * j)/ng)) for j in range(n+1)] xj = [_ for _ in xj if _ <= m] lxj = len(xj) # B is an array just holding a few values of B(x,y), the ones needed. # B[j] == B(x_j, j) if lxj == 0: return np.round(special.binom(m + n, n)) B = np.zeros(lxj) B[0] = 1 # Compute the B(x, y) terms # The binomial coefficient is an integer, but special.binom() may return a float. # Round it to the nearest integer. for j in range(1, lxj): Bj = np.round(special.binom(xj[j] + j, j)) if not np.isfinite(Bj): raise FloatingPointError() for i in range(j): bin = np.round(special.binom(xj[j] - xj[i] + j - i, j-i)) dec = bin * B[i] Bj -= dec B[j] = Bj if not np.isfinite(Bj): raise FloatingPointError() # Compute the number of path extensions... num_paths = 0 for j in range(lxj): bin = np.round(special.binom((m-xj[j]) + (n - j), n-j)) term = B[j] * bin if not np.isfinite(term): raise FloatingPointError() num_paths += term return np.round(num_paths) def ks_2samp(data1, data2, alternative='two-sided', mode='auto'): """ Compute the Kolmogorov-Smirnov statistic on 2 samples. This is a two-sided test for the null hypothesis that 2 independent samples are drawn from the same continuous distribution. The alternative hypothesis can be either 'two-sided' (default), 'less' or 'greater'. Parameters ---------- data1, data2 : sequence of 1-D ndarrays Two arrays of sample observations assumed to be drawn from a continuous distribution, sample sizes can be different. alternative : {'two-sided', 'less', 'greater'}, optional Defines the alternative hypothesis (see explanation above). Default is 'two-sided'. mode : {'auto', 'exact', 'asymp'}, optional Defines the method used for calculating the p-value. Default is 'auto'. - 'exact' : use approximation to exact distribution of test statistic - 'asymp' : use asymptotic distribution of test statistic - 'auto' : use 'exact' for small size arrays, 'asymp' for large. Returns ------- statistic : float KS statistic pvalue : float two-tailed p-value Notes ----- This tests whether 2 samples are drawn from the same distribution. Note that, like in the case of the one-sample K-S test, the distribution is assumed to be continuous. In the one-sided test, the alternative is that the empirical cumulative distribution function F(x) of the data1 variable is "less" or "greater" than the empirical cumulative distribution function G(x) of the data2 variable, ``F(x)<=G(x)``, resp. ``F(x)>=G(x)``. If the K-S statistic is small or the p-value is high, then we cannot reject the hypothesis that the distributions of the two samples are the same. If the mode is 'auto', the computation is exact if the sample sizes are less than 10000. For larger sizes, the computation uses the Kolmogorov-Smirnov distributions to compute an approximate value. We generally follow Hodges' treatment of Drion/Gnedenko/Korolyuk [1]_. References ---------- .. [1] Hodges, J.L. Jr., "The Significance Probability of the Smirnov Two-Sample Test," Arkiv fiur Matematik, 3, No. 43 (1958), 469-86. Examples -------- >>> from scipy import stats >>> np.random.seed(12345678) #fix random seed to get the same result >>> n1 = 200 # size of first sample >>> n2 = 300 # size of second sample For a different distribution, we can reject the null hypothesis since the pvalue is below 1%: >>> rvs1 = stats.norm.rvs(size=n1, loc=0., scale=1) >>> rvs2 = stats.norm.rvs(size=n2, loc=0.5, scale=1.5) >>> stats.ks_2samp(rvs1, rvs2) (0.20833333333333334, 5.129279597781977e-05) For a slightly different distribution, we cannot reject the null hypothesis at a 10% or lower alpha since the p-value at 0.144 is higher than 10% >>> rvs3 = stats.norm.rvs(size=n2, loc=0.01, scale=1.0) >>> stats.ks_2samp(rvs1, rvs3) (0.10333333333333333, 0.14691437867433876) For an identical distribution, we cannot reject the null hypothesis since the p-value is high, 41%: >>> rvs4 = stats.norm.rvs(size=n2, loc=0.0, scale=1.0) >>> stats.ks_2samp(rvs1, rvs4) (0.07999999999999996, 0.41126949729859719) """ LARGE_N = 10000 # 'auto' will attempt to be exact if n1,n2 <= LARGE_N data1 = np.sort(data1) data2 = np.sort(data2) n1 = data1.shape[0] n2 = data2.shape[0] if min(n1, n2) == 0: raise ValueError('Data passed to ks_2samp must not be empty') data_all = np.concatenate([data1, data2]) # using searchsorted solves equal data problem cdf1 = np.searchsorted(data1, data_all, side='right') / n1 cdf2 = np.searchsorted(data2, data_all, side='right') / n2 cddiffs = cdf1 - cdf2 minS = -np.min(cddiffs) maxS = np.max(cddiffs) alt2Dvalue = {'less': minS, 'greater': maxS, 'two-sided': max(minS, maxS)} d = alt2Dvalue[alternative] g = gcd(n1, n2) n1g = n1 // g n2g = n2 // g prob = -np.inf original_mode = mode if mode == 'auto': if max(n1, n2) <= LARGE_N: mode = 'exact' else: mode = 'asymp' elif mode == 'exact': # If lcm(n1, n2) is too big, switch from exact to asymp if n1g >= np.iinfo(np.int).max / n2g: mode = 'asymp' warnings.warn( "Exact ks_2samp calculation not possible with samples sizes " "%d and %d. Switching to 'asymp' " % (n1, n2), RuntimeWarning) saw_fp_error = False if mode == 'exact': lcm = (n1 // g) * n2 h = int(np.round(d * lcm)) d = h * 1.0 / lcm if h == 0: prob = 1.0 else: try: if alternative == 'two-sided': if n1 == n2: prob = _compute_prob_outside_square(n1, h) else: prob = 1 - _compute_prob_inside_method(n1, n2, g, h) else: if n1 == n2: # prob = binom(2n, n-h) / binom(2n, n) # Evaluating in that form incurs roundoff errors # from special.binom. Instead calculate directly prob = 1.0 for j in range(h): prob = (n1 - j) * prob / (n1 + j + 1) else: num_paths = _count_paths_outside_method(n1, n2, g, h) bin = special.binom(n1 + n2, n1) if not np.isfinite(bin) or not np.isfinite(num_paths) or num_paths > bin: raise FloatingPointError() prob = num_paths / bin except FloatingPointError: # Switch mode mode = 'asymp' saw_fp_error = True # Can't raise warning here, inside the try finally: if saw_fp_error: if original_mode == 'exact': warnings.warn( "ks_2samp: Exact calculation overflowed. " "Switching to mode=%s" % mode, RuntimeWarning) else: if prob > 1 or prob < 0: mode = 'asymp' if original_mode == 'exact': warnings.warn( "ks_2samp: Exact calculation incurred large" " rounding error. Switching to mode=%s" % mode, RuntimeWarning) if mode == 'asymp': # The product n1*n2 is large. Use Smirnov's asymptoptic formula. if alternative == 'two-sided': en = np.sqrt(n1 * n2 / (n1 + n2)) # Switch to using kstwo.sf() when it becomes available. # prob = distributions.kstwo.sf(d, int(np.round(en))) prob = distributions.kstwobign.sf(en * d) else: m, n = max(n1, n2), min(n1, n2) z = np.sqrt(m*n/(m+n)) * d # Use Hodges' suggested approximation Eqn 5.3 expt = -2 * z**2 - 2 * z * (m + 2*n)/np.sqrt(m*n*(m+n))/3.0 prob = np.exp(expt) prob = (0 if prob < 0 else (1 if prob > 1 else prob)) return Ks_2sampResult(d, prob) def tiecorrect(rankvals): """ Tie correction factor for ties in the Mann-Whitney U and Kruskal-Wallis H tests. Parameters ---------- rankvals : array_like A 1-D sequence of ranks. Typically this will be the array returned by `~scipy.stats.rankdata`. Returns ------- factor : float Correction factor for U or H. See Also -------- rankdata : Assign ranks to the data mannwhitneyu : Mann-Whitney rank test kruskal : Kruskal-Wallis H test References ---------- .. [1] Siegel, S. (1956) Nonparametric Statistics for the Behavioral Sciences. New York: McGraw-Hill. Examples -------- >>> from scipy.stats import tiecorrect, rankdata >>> tiecorrect([1, 2.5, 2.5, 4]) 0.9 >>> ranks = rankdata([1, 3, 2, 4, 5, 7, 2, 8, 4]) >>> ranks array([ 1. , 4. , 2.5, 5.5, 7. , 8. , 2.5, 9. , 5.5]) >>> tiecorrect(ranks) 0.9833333333333333 """ arr = np.sort(rankvals) idx = np.nonzero(np.r_[True, arr[1:] != arr[:-1], True])[0] cnt = np.diff(idx).astype(np.float64) size = np.float64(arr.size) return 1.0 if size < 2 else 1.0 - (cnt**3 - cnt).sum() / (size**3 - size) MannwhitneyuResult = namedtuple('MannwhitneyuResult', ('statistic', 'pvalue')) def mannwhitneyu(x, y, use_continuity=True, alternative=None): """ Compute the Mann-Whitney rank test on samples x and y. Parameters ---------- x, y : array_like Array of samples, should be one-dimensional. use_continuity : bool, optional Whether a continuity correction (1/2.) should be taken into account. Default is True. alternative : None (deprecated), 'less', 'two-sided', or 'greater' Whether to get the p-value for the one-sided hypothesis ('less' or 'greater') or for the two-sided hypothesis ('two-sided'). Defaults to None, which results in a p-value half the size of the 'two-sided' p-value and a different U statistic. The default behavior is not the same as using 'less' or 'greater': it only exists for backward compatibility and is deprecated. Returns ------- statistic : float The Mann-Whitney U statistic, equal to min(U for x, U for y) if `alternative` is equal to None (deprecated; exists for backward compatibility), and U for y otherwise. pvalue : float p-value assuming an asymptotic normal distribution. One-sided or two-sided, depending on the choice of `alternative`. Notes ----- Use only when the number of observation in each sample is > 20 and you have 2 independent samples of ranks. Mann-Whitney U is significant if the u-obtained is LESS THAN or equal to the critical value of U. This test corrects for ties and by default uses a continuity correction. References ---------- .. [1] https://en.wikipedia.org/wiki/Mann-Whitney_U_test .. [2] H.B. Mann and D.R. Whitney, "On a Test of Whether one of Two Random Variables is Stochastically Larger than the Other," The Annals of Mathematical Statistics, vol. 18, no. 1, pp. 50-60, 1947. """ if alternative is None: warnings.warn("Calling `mannwhitneyu` without specifying " "`alternative` is deprecated.", DeprecationWarning) x = np.asarray(x) y = np.asarray(y) n1 = len(x) n2 = len(y) ranked = rankdata(np.concatenate((x, y))) rankx = ranked[0:n1] # get the x-ranks u1 = n1*n2 + (n1*(n1+1))/2.0 - np.sum(rankx, axis=0) # calc U for x u2 = n1*n2 - u1 # remainder is U for y T = tiecorrect(ranked) if T == 0: raise ValueError('All numbers are identical in mannwhitneyu') sd = np.sqrt(T * n1 * n2 * (n1+n2+1) / 12.0) meanrank = n1*n2/2.0 + 0.5 * use_continuity if alternative is None or alternative == 'two-sided': bigu = max(u1, u2) elif alternative == 'less': bigu = u1 elif alternative == 'greater': bigu = u2 else: raise ValueError("alternative should be None, 'less', 'greater' " "or 'two-sided'") z = (bigu - meanrank) / sd if alternative is None: # This behavior, equal to half the size of the two-sided # p-value, is deprecated. p = distributions.norm.sf(abs(z)) elif alternative == 'two-sided': p = 2 * distributions.norm.sf(abs(z)) else: p = distributions.norm.sf(z) u = u2 # This behavior is deprecated. if alternative is None: u = min(u1, u2) return MannwhitneyuResult(u, p) RanksumsResult = namedtuple('RanksumsResult', ('statistic', 'pvalue')) def ranksums(x, y): """ Compute the Wilcoxon rank-sum statistic for two samples. The Wilcoxon rank-sum test tests the null hypothesis that two sets of measurements are drawn from the same distribution. The alternative hypothesis is that values in one sample are more likely to be larger than the values in the other sample. This test should be used to compare two samples from continuous distributions. It does not handle ties between measurements in x and y. For tie-handling and an optional continuity correction see `scipy.stats.mannwhitneyu`. Parameters ---------- x,y : array_like The data from the two samples Returns ------- statistic : float The test statistic under the large-sample approximation that the rank sum statistic is normally distributed pvalue : float The two-sided p-value of the test References ---------- .. [1] https://en.wikipedia.org/wiki/Wilcoxon_rank-sum_test """ x, y = map(np.asarray, (x, y)) n1 = len(x) n2 = len(y) alldata = np.concatenate((x, y)) ranked = rankdata(alldata) x = ranked[:n1] s = np.sum(x, axis=0) expected = n1 * (n1+n2+1) / 2.0 z = (s - expected) / np.sqrt(n1*n2*(n1+n2+1)/12.0) prob = 2 * distributions.norm.sf(abs(z)) return RanksumsResult(z, prob) KruskalResult = namedtuple('KruskalResult', ('statistic', 'pvalue')) def kruskal(*args, **kwargs): """ Compute the Kruskal-Wallis H-test for independent samples The Kruskal-Wallis H-test tests the null hypothesis that the population median of all of the groups are equal. It is a non-parametric version of ANOVA. The test works on 2 or more independent samples, which may have different sizes. Note that rejecting the null hypothesis does not indicate which of the groups differs. Post-hoc comparisons between groups are required to determine which groups are different. Parameters ---------- sample1, sample2, ... : array_like Two or more arrays with the sample measurements can be given as arguments. nan_policy : {'propagate', 'raise', 'omit'}, optional Defines how to handle when input contains nan. 'propagate' returns nan, 'raise' throws an error, 'omit' performs the calculations ignoring nan values. Default is 'propagate'. Returns ------- statistic : float The Kruskal-Wallis H statistic, corrected for ties pvalue : float The p-value for the test using the assumption that H has a chi square distribution See Also -------- f_oneway : 1-way ANOVA mannwhitneyu : Mann-Whitney rank test on two samples. friedmanchisquare : Friedman test for repeated measurements Notes ----- Due to the assumption that H has a chi square distribution, the number of samples in each group must not be too small. A typical rule is that each sample must have at least 5 measurements. References ---------- .. [1] W. H. Kruskal & W. W. Wallis, "Use of Ranks in One-Criterion Variance Analysis", Journal of the American Statistical Association, Vol. 47, Issue 260, pp. 583-621, 1952. .. [2] https://en.wikipedia.org/wiki/Kruskal-Wallis_one-way_analysis_of_variance Examples -------- >>> from scipy import stats >>> x = [1, 3, 5, 7, 9] >>> y = [2, 4, 6, 8, 10] >>> stats.kruskal(x, y) KruskalResult(statistic=0.2727272727272734, pvalue=0.6015081344405895) >>> x = [1, 1, 1] >>> y = [2, 2, 2] >>> z = [2, 2] >>> stats.kruskal(x, y, z) KruskalResult(statistic=7.0, pvalue=0.0301973834223185) """ args = list(map(np.asarray, args)) num_groups = len(args) if num_groups < 2: raise ValueError("Need at least two groups in stats.kruskal()") for arg in args: if arg.size == 0: return KruskalResult(np.nan, np.nan) n = np.asarray(list(map(len, args))) if 'nan_policy' in kwargs.keys(): if kwargs['nan_policy'] not in ('propagate', 'raise', 'omit'): raise ValueError("nan_policy must be 'propagate', " "'raise' or'omit'") else: nan_policy = kwargs['nan_policy'] else: nan_policy = 'propagate' contains_nan = False for arg in args: cn = _contains_nan(arg, nan_policy) if cn[0]: contains_nan = True break if contains_nan and nan_policy == 'omit': for a in args: a = ma.masked_invalid(a) return mstats_basic.kruskal(*args) if contains_nan and nan_policy == 'propagate': return KruskalResult(np.nan, np.nan) alldata = np.concatenate(args) ranked = rankdata(alldata) ties = tiecorrect(ranked) if ties == 0: raise ValueError('All numbers are identical in kruskal') # Compute sum^2/n for each group and sum j = np.insert(np.cumsum(n), 0, 0) ssbn = 0 for i in range(num_groups): ssbn += _square_of_sums(ranked[j[i]:j[i+1]]) / n[i] totaln = np.sum(n, dtype=float) h = 12.0 / (totaln * (totaln + 1)) * ssbn - 3 * (totaln + 1) df = num_groups - 1 h /= ties return KruskalResult(h, distributions.chi2.sf(h, df)) FriedmanchisquareResult = namedtuple('FriedmanchisquareResult', ('statistic', 'pvalue')) def friedmanchisquare(*args): """ Compute the Friedman test for repeated measurements The Friedman test tests the null hypothesis that repeated measurements of the same individuals have the same distribution. It is often used to test for consistency among measurements obtained in different ways. For example, if two measurement techniques are used on the same set of individuals, the Friedman test can be used to determine if the two measurement techniques are consistent. Parameters ---------- measurements1, measurements2, measurements3... : array_like Arrays of measurements. All of the arrays must have the same number of elements. At least 3 sets of measurements must be given. Returns ------- statistic : float the test statistic, correcting for ties pvalue : float the associated p-value assuming that the test statistic has a chi squared distribution Notes ----- Due to the assumption that the test statistic has a chi squared distribution, the p-value is only reliable for n > 10 and more than 6 repeated measurements. References ---------- .. [1] https://en.wikipedia.org/wiki/Friedman_test """ k = len(args) if k < 3: raise ValueError('Less than 3 levels. Friedman test not appropriate.') n = len(args[0]) for i in range(1, k): if len(args[i]) != n: raise ValueError('Unequal N in friedmanchisquare. Aborting.') # Rank data data = np.vstack(args).T data = data.astype(float) for i in range(len(data)): data[i] = rankdata(data[i]) # Handle ties ties = 0 for i in range(len(data)): replist, repnum = find_repeats(array(data[i])) for t in repnum: ties += t * (t*t - 1) c = 1 - ties / (k*(k*k - 1)*n) ssbn = np.sum(data.sum(axis=0)**2) chisq = (12.0 / (k*n*(k+1)) * ssbn - 3*n*(k+1)) / c return FriedmanchisquareResult(chisq, distributions.chi2.sf(chisq, k - 1)) BrunnerMunzelResult = namedtuple('BrunnerMunzelResult', ('statistic', 'pvalue')) def brunnermunzel(x, y, alternative="two-sided", distribution="t", nan_policy='propagate'): """ Computes the Brunner-Munzel test on samples x and y The Brunner-Munzel test is a nonparametric test of the null hypothesis that when values are taken one by one from each group, the probabilities of getting large values in both groups are equal. Unlike the Wilcoxon-Mann-Whitney's U test, this does not require the assumption of equivariance of two groups. Note that this does not assume the distributions are same. This test works on two independent samples, which may have different sizes. Parameters ---------- x, y : array_like Array of samples, should be one-dimensional. alternative : 'less', 'two-sided', or 'greater', optional Whether to get the p-value for the one-sided hypothesis ('less' or 'greater') or for the two-sided hypothesis ('two-sided'). Defaults value is 'two-sided' . distribution: 't' or 'normal', optional Whether to get the p-value by t-distribution or by standard normal distribution. Defaults value is 't' . nan_policy : {'propagate', 'raise', 'omit'}, optional Defines how to handle when input contains nan. 'propagate' returns nan, 'raise' throws an error, 'omit' performs the calculations ignoring nan values. Default is 'propagate'. Returns ------- statistic : float The Brunner-Munzer W statistic. pvalue : float p-value assuming an t distribution. One-sided or two-sided, depending on the choice of `alternative` and `distribution`. See Also -------- mannwhitneyu : Mann-Whitney rank test on two samples. Notes ------- Brunner and Munzel recommended to estimate the p-value by t-distribution when the size of data is 50 or less. If the size is lower than 10, it would be better to use permuted Brunner Munzel test (see [2]_). References ---------- .. [1] Brunner, E. and Munzel, U. "The nonparametric Benhrens-Fisher problem: Asymptotic theory and a small-sample approximation". Biometrical Journal. Vol. 42(2000): 17-25. .. [2] Neubert, K. and Brunner, E. "A studentized permutation test for the non-parametric Behrens-Fisher problem". Computational Statistics and Data Analysis. Vol. 51(2007): 5192-5204. Examples -------- >>> from scipy import stats >>> x1 = [1,2,1,1,1,1,1,1,1,1,2,4,1,1] >>> x2 = [3,3,4,3,1,2,3,1,1,5,4] >>> w, p_value = stats.brunnermunzel(x1, x2) >>> w 3.1374674823029505 >>> p_value 0.0057862086661515377 """ x = np.asarray(x) y = np.asarray(y) # check both x and y cnx, npx = _contains_nan(x, nan_policy) cny, npy = _contains_nan(y, nan_policy) contains_nan = cnx or cny if npx == "omit" or npy == "omit": nan_policy = "omit" if contains_nan and nan_policy == "propagate": return BrunnerMunzelResult(np.nan, np.nan) elif contains_nan and nan_policy == "omit": x = ma.masked_invalid(x) y = ma.masked_invalid(y) return mstats_basic.brunnermunzel(x, y, alternative, distribution) nx = len(x) ny = len(y) if nx == 0 or ny == 0: return BrunnerMunzelResult(np.nan, np.nan) rankc = rankdata(np.concatenate((x, y))) rankcx = rankc[0:nx] rankcy = rankc[nx:nx+ny] rankcx_mean = np.mean(rankcx) rankcy_mean = np.mean(rankcy) rankx = rankdata(x) ranky = rankdata(y) rankx_mean = np.mean(rankx) ranky_mean = np.mean(ranky) Sx = np.sum(np.power(rankcx - rankx - rankcx_mean + rankx_mean, 2.0)) Sx /= nx - 1 Sy = np.sum(np.power(rankcy - ranky - rankcy_mean + ranky_mean, 2.0)) Sy /= ny - 1 wbfn = nx * ny * (rankcy_mean - rankcx_mean) wbfn /= (nx + ny) * np.sqrt(nx * Sx + ny * Sy) if distribution == "t": df_numer = np.power(nx * Sx + ny * Sy, 2.0) df_denom = np.power(nx * Sx, 2.0) / (nx - 1) df_denom += np.power(ny * Sy, 2.0) / (ny - 1) df = df_numer / df_denom p = distributions.t.cdf(wbfn, df) elif distribution == "normal": p = distributions.norm.cdf(wbfn) else: raise ValueError( "distribution should be 't' or 'normal'") if alternative == "greater": pass elif alternative == "less": p = 1 - p elif alternative == "two-sided": p = 2 * np.min([p, 1-p]) else: raise ValueError( "alternative should be 'less', 'greater' or 'two-sided'") return BrunnerMunzelResult(wbfn, p) def combine_pvalues(pvalues, method='fisher', weights=None): """ Methods for combining the p-values of independent tests bearing upon the same hypothesis. Parameters ---------- pvalues : array_like, 1-D Array of p-values assumed to come from independent tests. method : {'fisher', 'pearson', 'tippett', 'stouffer', 'mudholkar_george'}, optional. Name of method to use to combine p-values. The following methods are available: - "fisher": Fisher's method (Fisher's combined probability test), the default, the sum of the logarithm of the p-values. - "pearson": Pearson's method (similar to Fisher's but uses sum of the complement of the p-values inside the logarithms). - "tippett": Tippett's method (minimum of p-values). - "stouffer": Stouffer's Z-score method. - "mudholkar_george": the difference of Fisher's and Pearson's methods divided by 2. weights : array_like, 1-D, optional Optional array of weights used only for Stouffer's Z-score method. Returns ------- statistic: float The statistic calculated by the specified method. pval: float The combined p-value. Notes ----- Fisher's method (also known as Fisher's combined probability test) [1]_ uses a chi-squared statistic to compute a combined p-value. The closely related Stouffer's Z-score method [2]_ uses Z-scores rather than p-values. The advantage of Stouffer's method is that it is straightforward to introduce weights, which can make Stouffer's method more powerful than Fisher's method when the p-values are from studies of different size [6]_ [7]_. The Pearson's method uses :math:`log(1-p_i)` inside the sum whereas Fisher's method uses :math:`log(p_i)` [4]_. For Fisher's and Pearson's method, the sum of the logarithms is multiplied by -2 in the implementation. This quantity has a chisquare distribution that determines the p-value. The `mudholkar_george` method is the difference of the Fisher's and Pearson's test statistics, each of which include the -2 factor [4]_. However, the `mudholkar_george` method does not include these -2 factors. The test statistic of `mudholkar_george` is the sum of logisitic random variables and equation 3.6 in [3]_ is used to approximate the p-value based on Student's t-distribution. Fisher's method may be extended to combine p-values from dependent tests [5]_. Extensions such as Brown's method and Kost's method are not currently implemented. .. versionadded:: 0.15.0 References ---------- .. [1] https://en.wikipedia.org/wiki/Fisher%27s_method .. [2] https://en.wikipedia.org/wiki/Fisher%27s_method#Relation_to_Stouffer.27s_Z-score_method .. [3] George, E. O., and G. S. Mudholkar. "On the convolution of logistic random variables." Metrika 30.1 (1983): 1-13. .. [4] Heard, N. and Rubin-Delanchey, P. "Choosing between methods of combining p-values." Biometrika 105.1 (2018): 239-246. .. [5] Whitlock, M. C. "Combining probability from independent tests: the weighted Z-method is superior to Fisher's approach." Journal of Evolutionary Biology 18, no. 5 (2005): 1368-1373. .. [6] Zaykin, Dmitri V. "Optimally weighted Z-test is a powerful method for combining probabilities in meta-analysis." Journal of Evolutionary Biology 24, no. 8 (2011): 1836-1841. .. [7] https://en.wikipedia.org/wiki/Extensions_of_Fisher%27s_method """ pvalues = np.asarray(pvalues) if pvalues.ndim != 1: raise ValueError("pvalues is not 1-D") if method == 'fisher': statistic = -2 * np.sum(np.log(pvalues)) pval = distributions.chi2.sf(statistic, 2 * len(pvalues)) elif method == 'pearson': statistic = -2 * np.sum(np.log1p(-pvalues)) pval = distributions.chi2.sf(statistic, 2 * len(pvalues)) elif method == 'mudholkar_george': statistic = -np.sum(np.log(pvalues)) + np.sum(np.log1p(-pvalues)) nu = 5 * len(pvalues) + 4 approx_factor = np.sqrt(nu / (nu - 2)) pval = distributions.t.sf(statistic * approx_factor, nu) elif method == 'tippett': statistic = np.min(pvalues) pval = distributions.beta.sf(statistic, 1, len(pvalues)) elif method == 'stouffer': if weights is None: weights = np.ones_like(pvalues) elif len(weights) != len(pvalues): raise ValueError("pvalues and weights must be of the same size.") weights = np.asarray(weights) if weights.ndim != 1: raise ValueError("weights is not 1-D") Zi = distributions.norm.isf(pvalues) statistic = np.dot(weights, Zi) / np.linalg.norm(weights) pval = distributions.norm.sf(statistic) else: raise ValueError( "Invalid method '%s'. Options are 'fisher', 'pearson', \ 'mudholkar_george', 'tippett', 'or 'stouffer'", method) return (statistic, pval) ##################################### # STATISTICAL DISTANCES # ##################################### def wasserstein_distance(u_values, v_values, u_weights=None, v_weights=None): r""" Compute the first Wasserstein distance between two 1D distributions. This distance is also known as the earth mover's distance, since it can be seen as the minimum amount of "work" required to transform :math:`u` into :math:`v`, where "work" is measured as the amount of distribution weight that must be moved, multiplied by the distance it has to be moved. .. versionadded:: 1.0.0 Parameters ---------- u_values, v_values : array_like Values observed in the (empirical) distribution. u_weights, v_weights : array_like, optional Weight for each value. If unspecified, each value is assigned the same weight. `u_weights` (resp. `v_weights`) must have the same length as `u_values` (resp. `v_values`). If the weight sum differs from 1, it must still be positive and finite so that the weights can be normalized to sum to 1. Returns ------- distance : float The computed distance between the distributions. Notes ----- The first Wasserstein distance between the distributions :math:`u` and :math:`v` is: .. math:: l_1 (u, v) = \inf_{\pi \in \Gamma (u, v)} \int_{\mathbb{R} \times \mathbb{R}} |x-y| \mathrm{d} \pi (x, y) where :math:`\Gamma (u, v)` is the set of (probability) distributions on :math:`\mathbb{R} \times \mathbb{R}` whose marginals are :math:`u` and :math:`v` on the first and second factors respectively. If :math:`U` and :math:`V` are the respective CDFs of :math:`u` and :math:`v`, this distance also equals to: .. math:: l_1(u, v) = \int_{-\infty}^{+\infty} |U-V| See [2]_ for a proof of the equivalence of both definitions. The input distributions can be empirical, therefore coming from samples whose values are effectively inputs of the function, or they can be seen as generalized functions, in which case they are weighted sums of Dirac delta functions located at the specified values. References ---------- .. [1] "Wasserstein metric", https://en.wikipedia.org/wiki/Wasserstein_metric .. [2] Ramdas, Garcia, Cuturi "On Wasserstein Two Sample Testing and Related Families of Nonparametric Tests" (2015). :arXiv:`1509.02237`. Examples -------- >>> from scipy.stats import wasserstein_distance >>> wasserstein_distance([0, 1, 3], [5, 6, 8]) 5.0 >>> wasserstein_distance([0, 1], [0, 1], [3, 1], [2, 2]) 0.25 >>> wasserstein_distance([3.4, 3.9, 7.5, 7.8], [4.5, 1.4], ... [1.4, 0.9, 3.1, 7.2], [3.2, 3.5]) 4.0781331438047861 """ return _cdf_distance(1, u_values, v_values, u_weights, v_weights) def energy_distance(u_values, v_values, u_weights=None, v_weights=None): r""" Compute the energy distance between two 1D distributions. .. versionadded:: 1.0.0 Parameters ---------- u_values, v_values : array_like Values observed in the (empirical) distribution. u_weights, v_weights : array_like, optional Weight for each value. If unspecified, each value is assigned the same weight. `u_weights` (resp. `v_weights`) must have the same length as `u_values` (resp. `v_values`). If the weight sum differs from 1, it must still be positive and finite so that the weights can be normalized to sum to 1. Returns ------- distance : float The computed distance between the distributions. Notes ----- The energy distance between two distributions :math:`u` and :math:`v`, whose respective CDFs are :math:`U` and :math:`V`, equals to: .. math:: D(u, v) = \left( 2\mathbb E|X - Y| - \mathbb E|X - X'| - \mathbb E|Y - Y'| \right)^{1/2} where :math:`X` and :math:`X'` (resp. :math:`Y` and :math:`Y'`) are independent random variables whose probability distribution is :math:`u` (resp. :math:`v`). As shown in [2]_, for one-dimensional real-valued variables, the energy distance is linked to the non-distribution-free version of the Cramer-von Mises distance: .. math:: D(u, v) = \sqrt{2} l_2(u, v) = \left( 2 \int_{-\infty}^{+\infty} (U-V)^2 \right)^{1/2} Note that the common Cramer-von Mises criterion uses the distribution-free version of the distance. See [2]_ (section 2), for more details about both versions of the distance. The input distributions can be empirical, therefore coming from samples whose values are effectively inputs of the function, or they can be seen as generalized functions, in which case they are weighted sums of Dirac delta functions located at the specified values. References ---------- .. [1] "Energy distance", https://en.wikipedia.org/wiki/Energy_distance .. [2] Szekely "E-statistics: The energy of statistical samples." Bowling Green State University, Department of Mathematics and Statistics, Technical Report 02-16 (2002). .. [3] Rizzo, Szekely "Energy distance." Wiley Interdisciplinary Reviews: Computational Statistics, 8(1):27-38 (2015). .. [4] Bellemare, Danihelka, Dabney, Mohamed, Lakshminarayanan, Hoyer, Munos "The Cramer Distance as a Solution to Biased Wasserstein Gradients" (2017). :arXiv:`1705.10743`. Examples -------- >>> from scipy.stats import energy_distance >>> energy_distance([0], [2]) 2.0000000000000004 >>> energy_distance([0, 8], [0, 8], [3, 1], [2, 2]) 1.0000000000000002 >>> energy_distance([0.7, 7.4, 2.4, 6.8], [1.4, 8. ], ... [2.1, 4.2, 7.4, 8. ], [7.6, 8.8]) 0.88003340976158217 """ return np.sqrt(2) * _cdf_distance(2, u_values, v_values, u_weights, v_weights) def _cdf_distance(p, u_values, v_values, u_weights=None, v_weights=None): r""" Compute, between two one-dimensional distributions :math:`u` and :math:`v`, whose respective CDFs are :math:`U` and :math:`V`, the statistical distance that is defined as: .. math:: l_p(u, v) = \left( \int_{-\infty}^{+\infty} |U-V|^p \right)^{1/p} p is a positive parameter; p = 1 gives the Wasserstein distance, p = 2 gives the energy distance. Parameters ---------- u_values, v_values : array_like Values observed in the (empirical) distribution. u_weights, v_weights : array_like, optional Weight for each value. If unspecified, each value is assigned the same weight. `u_weights` (resp. `v_weights`) must have the same length as `u_values` (resp. `v_values`). If the weight sum differs from 1, it must still be positive and finite so that the weights can be normalized to sum to 1. Returns ------- distance : float The computed distance between the distributions. Notes ----- The input distributions can be empirical, therefore coming from samples whose values are effectively inputs of the function, or they can be seen as generalized functions, in which case they are weighted sums of Dirac delta functions located at the specified values. References ---------- .. [1] Bellemare, Danihelka, Dabney, Mohamed, Lakshminarayanan, Hoyer, Munos "The Cramer Distance as a Solution to Biased Wasserstein Gradients" (2017). :arXiv:`1705.10743`. """ u_values, u_weights = _validate_distribution(u_values, u_weights) v_values, v_weights = _validate_distribution(v_values, v_weights) u_sorter = np.argsort(u_values) v_sorter = np.argsort(v_values) all_values = np.concatenate((u_values, v_values)) all_values.sort(kind='mergesort') # Compute the differences between pairs of successive values of u and v. deltas = np.diff(all_values) # Get the respective positions of the values of u and v among the values of # both distributions. u_cdf_indices = u_values[u_sorter].searchsorted(all_values[:-1], 'right') v_cdf_indices = v_values[v_sorter].searchsorted(all_values[:-1], 'right') # Calculate the CDFs of u and v using their weights, if specified. if u_weights is None: u_cdf = u_cdf_indices / u_values.size else: u_sorted_cumweights = np.concatenate(([0], np.cumsum(u_weights[u_sorter]))) u_cdf = u_sorted_cumweights[u_cdf_indices] / u_sorted_cumweights[-1] if v_weights is None: v_cdf = v_cdf_indices / v_values.size else: v_sorted_cumweights = np.concatenate(([0], np.cumsum(v_weights[v_sorter]))) v_cdf = v_sorted_cumweights[v_cdf_indices] / v_sorted_cumweights[-1] # Compute the value of the integral based on the CDFs. # If p = 1 or p = 2, we avoid using np.power, which introduces an overhead # of about 15%. if p == 1: return np.sum(np.multiply(np.abs(u_cdf - v_cdf), deltas)) if p == 2: return np.sqrt(np.sum(np.multiply(np.square(u_cdf - v_cdf), deltas))) return np.power(np.sum(np.multiply(np.power(np.abs(u_cdf - v_cdf), p), deltas)), 1/p) def _validate_distribution(values, weights): """ Validate the values and weights from a distribution input of `cdf_distance` and return them as ndarray objects. Parameters ---------- values : array_like Values observed in the (empirical) distribution. weights : array_like Weight for each value. Returns ------- values : ndarray Values as ndarray. weights : ndarray Weights as ndarray. """ # Validate the value array. values = np.asarray(values, dtype=float) if len(values) == 0: raise ValueError("Distribution can't be empty.") # Validate the weight array, if specified. if weights is not None: weights = np.asarray(weights, dtype=float) if len(weights) != len(values): raise ValueError('Value and weight array-likes for the same ' 'empirical distribution must be of the same size.') if np.any(weights < 0): raise ValueError('All weights must be non-negative.') if not 0 < np.sum(weights) < np.inf: raise ValueError('Weight array-like sum must be positive and ' 'finite. Set as None for an equal distribution of ' 'weight.') return values, weights return values, None ##################################### # SUPPORT FUNCTIONS # ##################################### RepeatedResults = namedtuple('RepeatedResults', ('values', 'counts')) def find_repeats(arr): """ Find repeats and repeat counts. Parameters ---------- arr : array_like Input array. This is cast to float64. Returns ------- values : ndarray The unique values from the (flattened) input that are repeated. counts : ndarray Number of times the corresponding 'value' is repeated. Notes ----- In numpy >= 1.9 `numpy.unique` provides similar functionality. The main difference is that `find_repeats` only returns repeated values. Examples -------- >>> from scipy import stats >>> stats.find_repeats([2, 1, 2, 3, 2, 2, 5]) RepeatedResults(values=array([2.]), counts=array([4])) >>> stats.find_repeats([[10, 20, 1, 2], [5, 5, 4, 4]]) RepeatedResults(values=array([4., 5.]), counts=array([2, 2])) """ # Note: always copies. return RepeatedResults(*_find_repeats(np.array(arr, dtype=np.float64))) def _sum_of_squares(a, axis=0): """ Square each element of the input array, and return the sum(s) of that. Parameters ---------- a : array_like Input array. axis : int or None, optional Axis along which to calculate. Default is 0. If None, compute over the whole array `a`. Returns ------- sum_of_squares : ndarray The sum along the given axis for (a**2). See also -------- _square_of_sums : The square(s) of the sum(s) (the opposite of `_sum_of_squares`). """ a, axis = _chk_asarray(a, axis) return np.sum(a*a, axis) def _square_of_sums(a, axis=0): """ Sum elements of the input array, and return the square(s) of that sum. Parameters ---------- a : array_like Input array. axis : int or None, optional Axis along which to calculate. Default is 0. If None, compute over the whole array `a`. Returns ------- square_of_sums : float or ndarray The square of the sum over `axis`. See also -------- _sum_of_squares : The sum of squares (the opposite of `square_of_sums`). """ a, axis = _chk_asarray(a, axis) s = np.sum(a, axis) if not np.isscalar(s): return s.astype(float) * s else: return float(s) * s def rankdata(a, method='average'): """ Assign ranks to data, dealing with ties appropriately. Ranks begin at 1. The `method` argument controls how ranks are assigned to equal values. See [1]_ for further discussion of ranking methods. Parameters ---------- a : array_like The array of values to be ranked. The array is first flattened. method : str, optional The method used to assign ranks to tied elements. The options are 'average', 'min', 'max', 'dense' and 'ordinal'. 'average': The average of the ranks that would have been assigned to all the tied values is assigned to each value. 'min': The minimum of the ranks that would have been assigned to all the tied values is assigned to each value. (This is also referred to as "competition" ranking.) 'max': The maximum of the ranks that would have been assigned to all the tied values is assigned to each value. 'dense': Like 'min', but the rank of the next highest element is assigned the rank immediately after those assigned to the tied elements. 'ordinal': All values are given a distinct rank, corresponding to the order that the values occur in `a`. The default is 'average'. Returns ------- ranks : ndarray An array of length equal to the size of `a`, containing rank scores. References ---------- .. [1] "Ranking", https://en.wikipedia.org/wiki/Ranking Examples -------- >>> from scipy.stats import rankdata >>> rankdata([0, 2, 3, 2]) array([ 1. , 2.5, 4. , 2.5]) >>> rankdata([0, 2, 3, 2], method='min') array([ 1, 2, 4, 2]) >>> rankdata([0, 2, 3, 2], method='max') array([ 1, 3, 4, 3]) >>> rankdata([0, 2, 3, 2], method='dense') array([ 1, 2, 3, 2]) >>> rankdata([0, 2, 3, 2], method='ordinal') array([ 1, 2, 4, 3]) """ if method not in ('average', 'min', 'max', 'dense', 'ordinal'): raise ValueError('unknown method "{0}"'.format(method)) arr = np.ravel(np.asarray(a)) algo = 'mergesort' if method == 'ordinal' else 'quicksort' sorter = np.argsort(arr, kind=algo) inv = np.empty(sorter.size, dtype=np.intp) inv[sorter] = np.arange(sorter.size, dtype=np.intp) if method == 'ordinal': return inv + 1 arr = arr[sorter] obs = np.r_[True, arr[1:] != arr[:-1]] dense = obs.cumsum()[inv] if method == 'dense': return dense # cumulative counts of each unique value count = np.r_[np.nonzero(obs)[0], len(obs)] if method == 'max': return count[dense] if method == 'min': return count[dense - 1] + 1 # average method return .5 * (count[dense] + count[dense - 1] + 1)
jor-/scipy
scipy/stats/stats.py
Python
bsd-3-clause
233,671
[ "DIRAC" ]
1fb09185f55bd33a20b8f670a682e9d8fe0615d94efcecc27ca2328c90ffe671
# -*- coding: utf-8 -*- import sqlite3 class GalaxyDB: PLANET_TYPE_PLANET = 1 PLANET_TYPE_BASE = 5 def __init__(self): self._conn = sqlite3.connect('galaxy5.db') self._conn.row_factory = sqlite3.Row self._cur = self._conn.cursor() self._log_queries = False def close(self): self._cur.close() self._conn.close() del self._cur del self._conn def create_query(self, where_clause=None, sort_col=None, sort_order=None): q = 'SELECT g,s,p, \n' \ ' planet_id, planet_name, planet_type, planet_metal, planet_crystal, planet_destroyed, \n' \ ' luna_id, luna_name, luna_diameter, luna_destroyed, \n' \ ' user_id, user_name, user_rank, user_onlinetime, user_banned, user_ro, user_race, \n' \ ' ally_id, ally_name, ally_tag, ally_members \n' \ ' FROM planets' if where_clause is not None: q += ' \n' q += where_clause # sort, order q += '\n ORDER BY ' # fix invalid input if sort_order is not None: if sort_order not in ['asc', 'desc']: sort_order = None if sort_col is not None: if sort_col not in ['planet_name', 'planet_type', 'user_name', 'user_rank', 'ally_name', 'luna_name']: sort_col = None # append sorting if sort_col is not None: q += sort_col if sort_order is not None: q += ' ' q += sort_order q += ', ' q += 'g ASC, s ASC, p ASC' # by default, always sort by coords # log query if self._log_queries: try: with open('queries.log', mode='at', encoding='UTF-8') as f: f.write(q) f.write('\n') except IOError: pass return q @staticmethod def safe_int(val): if val is None: return 0 try: r = int(val) except ValueError: r = 0 return r @staticmethod def safe_str(val): if val is None: return '' return str(val) def _rows_to_res_list(self): rows_list = [] rows = self._cur.fetchall() for row in rows: r = dict() r['coords'] = '[{0}:{1}:{2}]'.format(row['g'], row['s'], row['p']) r['coords_link'] = '<a href="http://uni5.xnova.su/galaxy/{3}/{4}/" target="_blank">' \ '[{0}:{1}:{2}]</a>'.format(row['g'], row['s'], row['p'], row['g'], row['s']) r['planet_id'] = GalaxyDB.safe_int(row['planet_id']) r['planet_name'] = GalaxyDB.safe_str(row['planet_name']) r['planet_type'] = GalaxyDB.safe_int(row['planet_type']) r['user_id'] = GalaxyDB.safe_int(row['user_id']) r['user_name'] = GalaxyDB.safe_str(row['user_name']) r['user_rank'] = GalaxyDB.safe_int(row['user_rank']) r['user_onlinetime'] = GalaxyDB.safe_int(row['user_onlinetime']) r['user_banned'] = GalaxyDB.safe_int(row['user_banned']) r['user_ro'] = GalaxyDB.safe_int(row['user_ro']) # fix user name to include extra data user_flags = '' if r['user_ro'] > 0: user_flags += 'U' if r['user_banned'] > 0: user_flags += 'G' if r['user_onlinetime'] == 1: user_flags += 'i' if r['user_onlinetime'] == 2: user_flags += 'I' if user_flags != '': r['user_name'] += ' (' + user_flags + ')' # user race and race icon r['user_race'] = GalaxyDB.safe_int(row['user_race']) r['user_race_img'] = '<img border="0" src="css/icons/race{0}.png" width="18" />'.format(r['user_race']) r['ally_name'] = GalaxyDB.safe_str(row['ally_name']) r['ally_tag'] = GalaxyDB.safe_str(row['ally_tag']) r['ally_members'] = GalaxyDB.safe_int(row['ally_members']) # process ally info if r['ally_tag'] != r['ally_name']: r['ally_name'] += ' [{0}]'.format(r['ally_tag']) r['ally_name'] += ' ({0} тел)'.format(r['ally_members']) if r['ally_members'] == 0: r['ally_name'] = '' r['luna_name'] = GalaxyDB.safe_str(row['luna_name']) r['luna_diameter'] = GalaxyDB.safe_int(row['luna_diameter']) # process luna if (r['luna_name'] != '') and (r['luna_diameter'] > 0): r['luna_name'] += ' ({0})'.format(r['luna_diameter']) # process planet type (detect bases) if r['planet_type'] == GalaxyDB.PLANET_TYPE_BASE: r['planet_name'] += ' (base)' rows_list.append(r) res_dict = dict() res_dict['rows'] = rows_list return res_dict def query_like(self, col_name, value, sort_col=None, sort_order=None): if type(col_name) == str: where = 'WHERE ' + col_name + ' LIKE ?' params = (value, ) elif type(col_name) == list: where = 'WHERE' params = list() for col in col_name: where += ' ' where += col where += ' LIKE ? OR' params.append(value) where = where[0:-2] else: where = None params = None q = self.create_query(where, sort_col, sort_order) self._cur.execute(q, params) return self._rows_to_res_list() def query_inactives(self, user_flags, gal_ints, s_min, s_max, min_rank=0, sort_col=None, sort_order=None): user_where = '' gals_where = '' syss_where = '' rank_where = '' # user flags # user online time user_ot = '' if 'i' in user_flags: user_ot = 'user_onlinetime=1' if 'I' in user_flags: user_ot = 'user_onlinetime>0' user_where += user_ot # user banned or not banned, exlusively set if 'G' in user_flags: if user_where != '': user_where += ' AND ' user_where += 'user_banned>0' else: if user_where != '': user_where += ' AND ' user_where += 'user_banned=0' # user ro or not, exclusively if 'U' in user_flags: if user_where != '': user_where += ' AND ' user_where += 'user_ro>0' else: if user_where != '': user_where += ' AND ' user_where += 'user_ro=0' # galaxies if type(gal_ints) == list: gals_where = 'g IN (' for g in gal_ints: gals_where += '{0},'.format(g) gals_where = gals_where[0:-1] gals_where += ')' # systems if s_min <= s_max: syss_where = 's BETWEEN {0} AND {1}'.format(s_min, s_max) # rank if min_rank > 0: rank_where = ' AND (user_rank BETWEEN 1 AND {0})'.format(min_rank) # final WHERE clause where = 'WHERE ({0}) AND ({1}) AND ({2}) {3}'.format(user_where, gals_where, syss_where, rank_where) q = self.create_query(where, sort_col, sort_order) self._cur.execute(q) return self._rows_to_res_list() def query_planets_count(self, gal: int, sys_: int) -> int: self._cur.execute('SELECT COUNT(*) FROM planets WHERE g=? AND s=?', (gal, sys_)) rows = self._cur.fetchall() assert len(rows) == 1 assert len(rows[0]) == 1 return self.safe_int(rows[0][0]) def find_player_by_name(self, player_name: str) -> tuple: q = 'SELECT user_id, user_name FROM planets WHERE user_name LIKE ? LIMIT 1' self._cur.execute(q, (player_name, )) rows = self._cur.fetchall() if len(rows) == 1: return rows[0] return None def query_player_planets(self, player_name: str) -> list: q = 'SELECT g,s,p, planet_name, planet_type, luna_name, luna_diameter \n' \ ' FROM planets WHERE user_name=?' self._cur.execute(q, (player_name,)) ret = [] for row in self._cur.fetchall(): p = dict() p['g'] = GalaxyDB.safe_int(row['g']) p['s'] = GalaxyDB.safe_int(row['s']) p['p'] = GalaxyDB.safe_int(row['p']) p['planet_name'] = GalaxyDB.safe_str(row['planet_name']) p['planet_type'] = GalaxyDB.safe_int(row['planet_type']) p['luna_name'] = GalaxyDB.safe_str(row['luna_name']) p['luna_diameter'] = GalaxyDB.safe_int(row['luna_diameter']) ret.append(p) return ret
minlexx/xnova_galaxy_parser
xnova/galaxy_db.py
Python
gpl-3.0
8,937
[ "Galaxy" ]
c1235bb785485856e76683707ef1c3791132ac0939022ac3e3828399a0a83f6f
#!/usr/bin/env python #-*- coding:utf-8 -*- # vim:ai:sta:et:ts=4:sw=4:sts=4 """kernelng 0.x Tool for maintaining customized overlays of kernel-ng.eclass-based ebuilds Copyright 2005-2014 Gentoo Foundation Copyright (C) 2005 Colin Kingsley <tercel@gentoo.org> Copyright (C) 2008 Zac Medico <zmedico@gentoo.org> Copyright (C) 2009 Sebastian Pipping <sebastian@pipping.org> Copyright (C) 2009 Christian Ruppert <idl0r@gentoo.org> Copyright (C) 2012 Brian Dolbec <dolsen@gentoo.org> Copyright (C) 2014 Gregory M. Turner <gmt@be-evil.net> Distributed under the terms of the GNU General Public License v2 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. """ import signal import sys import hashlib import subprocess from kernelng.output import encoder, get_encoding, decode_selection class Interactive(object): """Handles interactive features selection.""" def __init__(self, features, options, output): self.output = output self.features = [] self.interactive(features, options) self.output.write('Interactive.interactive(): self.features = %s\n' % self.features, 2) if not self.features or len(self.features[0]) == 0: sys.exit(1) def interactive(self, features, options): """ Some sort of interactive menu thingy. """ dialog = ['dialog', '--separate-output', '--stdout', '--title', '"Select kernel feature sets:"', '--checklist', '"Please select your desired features:'] dialog.extend(['20', '110', '14']) for (feature, args) in sorted(features, key = lambda x: x[1]['feature'].lower()): dialog.extend([ "%s" % feature, "%s" % args['description'], "OFF" ]) dialog = [encoder(x, get_encoding(sys.stdout)) for x in dialog] proc = subprocess.Popen( dialog, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = proc.communicate() self.features = out.splitlines() if self.features: if hasattr(self.features[0], 'decode'): self.features = decode_selection( [x.decode('utf-8').rstrip() for x in self.features]) else: self.features= decode_selection([x.rstrip() for x in self.features])
gmt/kernel-ng-util
kernelng/selectors.py
Python
gpl-2.0
2,879
[ "Brian" ]
8ea0abb2f716894e9e7070c072146b77fccf68668ddd53b67c50f02ca4253bf4
#!/usr/bin/env python ''' PortScanner.py (c) 2017 Luca Conterio This file is part of PortScanner.py. PortScanner.py is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. PortScanner.py is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with PortScanner.py. If not, see <http://www.gnu.org/licenses/>. ''' import logging logging.getLogger("scapy.runtime").setLevel(logging.ERROR) from scapy.all import * import socket import optparse import os from scanFunctions import * from results import * def determinePorts(ports): maxPortsNumber = 1000 if ports == None: ports = defaultPorts # CASO PER LA SCANSIONE DELLE PORTE PIU' COMUNI elif "/" in str(ports): # CASO IN CUI E' SPECIFICATO UN RANGE DI PORTE ports = ports.split("/") if int(ports[0]) < 0 or int(ports[1]) < 0 or int(ports[0]) > 65535 or int(ports[1]) > 65535: # CONTROLLO VALIDITA' VALORI PORTE print "\n\033[31;1m [!] ERROR: invalid port value(s) given\033[0m \n" return -1 if int(ports[0]) > int(ports[1]): # SE LA PRIMA PORTA SPECIFICATA NEL RANGE E' MAGGIORE DELLA SECONDA, LE SCAMBIO temp = ports[1] ports[1] = ports[0] ports[0] = temp ports = range(int(ports[0]), int(ports[1]) + 1) # ASSEGNO ALLA LISTA DELLE PORTE, TUTTI I NUMERI CONPRESI FRA I DUE VALORI DI PORTE SPECIFICATE if int(ports[len(ports)- 1]) - int(ports[0]) > maxPortsNumber: # CONTROLLO NUMERO DI PORTE: se troppo alto exit() ---> max 5000 altrimenti troppo lento parser.print_help() print "\n\033[31;1m [!] ERROR: too many target ports given, maximum is 1000 else the script would slow down too much!\n \033[0m" return -1 else: ports = ports.split(",") # CASO IN CUI SONO SPECIFICATE SINGOLE PORTE for p in ports: if int(p) > 65535 or int(p) < 0: # CONTROLLO VALIDITA' VALORI PORTE print "\n\033[31;1m [!] ERROR: invalid port value(s) given\033[0m \n" return -1 return ports # ================================= M A I N ===================================================================================== colors = {"REDbold":"\033[31;1m", "RED":"\033[31m", "BLUE":"\033[34;1m", "BLUEbold":"\033[34m", "GREENbold":"\033[32;1m", "GREEN":"\033[32m", "WHITEbold":"\033[37;1m", "WHITE":"\033[37m", "DEFAULT":"\033[0m"} scanTypesList = ["syn", "SYN", "s", "S", "fin", "FIN", "f", "F", "ack", "ACK", "a", "A", \ "null", "NULL", "n", "N", "XMAS", "xmas", "x", "X", "UDP", "udp", "u", "U"] os.system("clear") print colors["REDbold"] + "\n ( ( " print " )\ ) ))\ ) " print " (()/( ( ( /(()/( ) ( ( ( " print " /( ))( )( )\())(_)) ( ( /( ( ( ))\ )( () )\ ) " print " (_)" + colors["WHITEbold"] + "_" + colors["REDbold"] + "( )\(()\(_))(_))" + colors["WHITEbold"] + "_" + colors["REDbold"] + " )\ )(_)) )\ ) )\ ) /((_|()\ /(/( (()/( " print colors["WHITEbold"] + " | _ \_" + colors["REDbold"] + "((_)((_" + colors["WHITEbold"] + ") |/ ___|" + colors["REDbold"] + "((_|(_)_ _(_/( _(_/((_)) ((_) ((_)_\ )(_)) " print colors["WHITEbold"] + " | _/ _ \ '_| _\__ \/ _|/ _` | ' \ | ' \ | -_)| '_| _ | '_ \) || | " print " |_| \___/_| \__|___/\__|\__,_|_||_||_||_|\___||_| (_) | .__/ \_, | " print " |_| |__/ " print colors["DEFAULT"] + " [ Copyright (c) 2017 Luca Conterio - Version 1.3 ] " print " [ For more info visit: https://github.com/luca-conterio/PortScanner.py ] \n" time.sleep(1) timeout = 1 try: parser = optparse.OptionParser("\n python %prog -t <targetHost> [ -p <targetPort(s)> ] [ --type <scanType> ] \n\n Ctrl+C to close program") parser.add_option("--type", dest="type", type="string", help="Specify scan type you want to perform: it can be SYN, FIN, ACK or NULL scan. If it is not specified, default Tcp SYN scan (stealth scan) will be performed. \n") parser.add_option("-p", "--port", dest="ports", type="string", help="Specify port(s) to be scanned! You can specify a single port, a list of ports separating them by a comma (\",\") or a range of ports sparating the first and the last one by a slash (\"/\", max 1000 for range). If this parameter is missing, the default list of ports will be scanned: you can find the default list in the file \"/data/port_ervices.txt\" \n") parser.add_option("-t", "--target", dest="target", type="string", help="Specify target host! if you specify an IP address with a netmask (es. 155.98.2.230/20) the whole network will be scanned. \n") (options, args) = parser.parse_args() start = time.time() scantype = options.type tgthost = options.target tgtports = options.ports if scantype == None: scantype = "syn" # default scan type if not specified differently elif scantype not in scanTypesList: print colors["REDbold"] + " [!] ERROR: specified scan type not supported!\n" + colors["DEFAULT"] parser.print_help() exit(0) if tgthost == None: parser.print_help() print colors["REDbold"] + "\n [!] ERROR: please specify a target!\n" + colors["DEFAULT"] exit(0) else: try: hostname = socket.gethostbyaddr(tgthost)[0] if ".station" in hostname: hostname = hostname[:-8] except: hostname = tgthost if "/" in tgthost: # network report: scan for online hosts hostsUp, hostsDown, broadcast = NETWORKscan(tgthost) end = time.time() processing_time = end - start printResultsForAllHosts(tgthost, hostsUp, hostsDown, processing_time, broadcast) else: # scan single target host tgtports = determinePorts(tgtports) # create ports list ---> it returns -1 if there's an error in parameters if tgtports == -1: exit(0) # it means there's an error in parameters #if tgthost in myIPaddresses: # hostIsUp = True # if the target is my device, then it must obviously be up # latency = 0 #else: hostIsUp, latency = pingHost(tgthost) # check is the target host is UP or DOWN ---> returns a Boolean value and a latency (double) value if hostIsUp == True: print colors["GREENbold"] + " [+] Target host is up! \n" + colors["DEFAULT"] time.sleep(1) if scantype == "udp" or scantype == "UDP" or scantype == "u" or scantype == "U": createDefaultPortsList("udp") portsState, openPorts, closedPorts = UDPscan(tgthost, hostname, tgtports, timeout) else: createDefaultPortsList("tcp") if scantype == "syn" or scantype == "SYN" or scantype == "s" or scantype == "S": portsState, openPorts, closedPorts = SYNscan(tgthost, hostname, tgtports, timeout) elif scantype == "fin" or scantype == "FIN" or scantype == "f" or scantype == "F" or scantype == "null" or scantype == "NULL" \ or scantype == "n" or scantype == "N" or scantype == "xmas" or scantype == "XMAS" or scantype == "x" or scantype == "X": portsState, openPorts, closedPorts = FNXscan(scantype, tgthost, hostname, tgtports, timeout) # FIN, NULL and XMAS scans manage the same results so i use # the same function for them elif scantype == "ack" or scantype == "ACK" or scantype == "a" or scantype == "A": portsState, unfilteredPorts, filteredPorts = ACKscan(tgthost, hostname, tgtports, timeout) end = time.time() processing_time = end - start if scantype == "ack" or scantype == "ACK": printResultsForACKscan(tgthost, hostname, tgtports, portsState, unfilteredPorts, filteredPorts, latency, processing_time) elif scantype == "udp" or scantype == "UDP": printResults("udp", tgthost, hostname, tgtports, portsState, openPorts, closedPorts, latency, processing_time) else: printResults("tcp", tgthost, hostname, tgtports, portsState, openPorts, closedPorts, latency, processing_time) elif hostIsUp == False: print colors["REDbold"] + " [x] Host " + tgthost + " seems to be down!\n" + colors["DEFAULT"] except KeyboardInterrupt: end = time.time() processing_time = end - start # calcolo tempo trascorso fra start e il keyboard interrupt print "\n\n\n [*] KeyboardInterrupt: closing script...\n [*] Elapsed time: %.2f \n" % (processing_time)
luca-conterio/PortScanner.py
PortScanner.py
Python
gpl-3.0
9,316
[ "VisIt" ]
31b03eb84326331ed533c6cc92b8727d217492d8649341a9ce7fc8c21784fac5
# -*- coding: utf-8 -*- # # DiracDocs documentation build configuration file, created by # sphinx-quickstart on Sun Apr 25 17:34:37 2010. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import datetime import os import sys import subprocess sys.path.insert(0, ".") try: import fakeEnvironment except ImportError: pass try: import fakeEnv except ImportError: pass diracRelease = os.environ.get( 'DIRACVERSION', 'integration' ) if os.environ.get('READTHEDOCS') == 'True': diracRelease = os.path.basename( os.path.abspath( "../../" ) ) if diracRelease.startswith("rel-"): diracRelease = diracRelease[4:] print 'conf.py: %s as DIRACVERSION' % diracRelease #............................................................................... # configuration # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. if os.environ.get('READTHEDOCS') == 'True': sys.path.append(os.path.abspath('.')) diracPath = os.path.abspath( os.path.join( os.getcwd(), "../..") ) print "DiracPath",diracPath buildfolder ="_build" try: os.mkdir( os.path.abspath( "../"+buildfolder) ) except: pass ##We need to have the DIRAC module somewhere, or we cannot import it, as readtheDocs clones the repo into something based on the branchname if not os.path.exists( "../../DIRAC" ): diracLink = os.path.abspath( os.path.join( os.getcwd() , "../" , buildfolder, "DIRAC" ) ) print "DiracLink",diracLink if not os.path.exists( diracLink ): RES = subprocess.check_output( ["ln","-s", diracPath, diracLink ] ) diracPath = os.path.abspath( os.path.join( diracLink, ".." ) ) sys.path.insert(0, diracPath) for path in sys.path: os.environ['PYTHONPATH'] = os.environ.get('PYTHONPATH', '')+":"+path ## this is not working at the moment because the DIRAC folder is not found by the buildScriptsDOC script # print "Pythonpath",os.environ['PYTHONPATH'] # buildCommand = os.path.join( os.getcwd() , "../Tools/buildScriptsDOC.py" ) # scriptdir = os.path.abspath(os.path.join( os.getcwd() , "../", buildfolder, "scripts" )) # try: # os.mkdir( scriptdir ) # except: # pass # print "command", buildCommand # code = subprocess.Popen( ["python", buildCommand, scriptdir ], env = os.environ, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # stdout , err = code.communicate() # print "script",stdout # print "script",err os.environ["DIRAC"] = diracPath print "DIRAC ENVIRON", os.environ["DIRAC"] ##singlehtml build needs too much memory, so we need to create less code documentation buildtype = "limited" if any("singlehtml" in arg for arg in sys.argv ) else "full" print "Chosing build type:", buildtype buildCommand =os.path.join( os.getcwd() , "../Tools/MakeDoc.py" ) code = subprocess.Popen( ["python",buildCommand, buildtype], env = os.environ, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout , err = code.communicate() print "code",stdout print "code",err # -- General configuration ----------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.autosummary', 'sphinx.ext.intersphinx', 'sphinx.ext.napoleon', 'sphinx.ext.graphviz', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8' # The master toctree document. master_doc = 'index' # General information about the project. project = u'DIRAC' copyright = u'%s, DIRAC Project' % datetime.datetime.utcnow().year # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '' # The full version, including alpha/beta/rc tags. release = diracRelease # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. today_fmt = '%H:%M %d/%m/%Y %Z' # List of documents that shouldn't be included in the build. #unused_docs = [] # List of directories, relative to source directory, that shouldn't be searched # for source files. #ADRI: Ignore old stuff that is not included in the compilation exclude_trees = [ 'AdministratorGuide/Configuration/ConfigurationReference' ] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. html_theme = 'nature' html_style = 'dirac.css' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = { # 'sidebarbgcolor':'#D5E2F2' #} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". html_title = "DIRAC Documentation" # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = '_static/DIRAC-logo.png' # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. html_favicon = '_static/favicon.ico' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. html_last_updated_fmt = '%d/%m/%Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_use_modindex = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'DiracDocsdoc' # -- Options for LaTeX output -------------------------------------------------- # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'DiracDocs.tex', u'DIRAC Documentation', u'DIRAC Project.', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_use_modindex = True ## link with the python standard library docs intersphinx_mapping = { 'python': ('https://docs.python.org/2.7', None), } #............................................................................... #EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
arrabito/DIRAC
docs/source/conf.py
Python
gpl-3.0
9,724
[ "DIRAC" ]
3a40b9febcc962ab89043dcead1e2fc86a63d87d6c16084d587cc58994d1f1cd
import hashlib import json import yaml from urllib import urlencode from twisted.internet.defer import ( inlineCallbacks, DeferredQueue, returnValue, gatherResults) from twisted.internet import task, reactor from twisted.web import http from twisted.web.server import NOT_DONE_YET from twisted.trial.unittest import SkipTest from vumi.tests.helpers import VumiTestCase from vumi.tests.utils import MockHttpServer, LogCatcher from vumi.transports.tests.helpers import TransportHelper from vumi.transports.wechat import WeChatTransport from vumi.transports.wechat.errors import WeChatApiException from vumi.transports.wechat.message_types import ( WeChatXMLParser, TextMessage) from vumi.utils import http_request_full from vumi.message import TransportUserMessage from vumi.persist.fake_redis import FakeRedis def request(transport, method, path='', params={}, data=None): addr = transport.server.getHost() token = transport.get_static_config().auth_token nonce = '1234' timestamp = '2014-01-01T00:00:00' good_signature = hashlib.sha1( ''.join(sorted([timestamp, nonce, token]))).hexdigest() default_params = { 'signature': good_signature, 'timestamp': timestamp, 'nonce': nonce, } default_params.update(params) path += '?%s' % (urlencode(default_params),) url = 'http://%s:%s%s%s' % ( addr.host, addr.port, transport.get_static_config().web_path, path) return http_request_full(url, method=method, data=data) class WeChatTestCase(VumiTestCase): def setUp(self): self.tx_helper = self.add_helper(TransportHelper(WeChatTransport)) self.request_queue = DeferredQueue() self.mock_server = MockHttpServer(self.handle_api_request) self.add_cleanup(self.mock_server.stop) return self.mock_server.start() def handle_api_request(self, request): self.request_queue.put(request) return NOT_DONE_YET def get_transport(self, **config): defaults = { 'api_url': self.mock_server.url, 'auth_token': 'token', 'twisted_endpoint': 'tcp:0', 'wechat_appid': 'appid', 'wechat_secret': 'secret', 'embed_user_profile': False, } defaults.update(config) return self.tx_helper.get_transport(defaults) @inlineCallbacks def get_transport_with_access_token(self, access_token, **config): transport = yield self.get_transport(**config) yield transport.redis.set(WeChatTransport.ACCESS_TOKEN_KEY, access_token) returnValue(transport) class TestWeChatInboundMessaging(WeChatTestCase): @inlineCallbacks def test_auth_success(self): transport = yield self.get_transport() resp = yield request( transport, "GET", params={ 'echostr': 'success' }) self.assertEqual(resp.delivered_body, 'success') @inlineCallbacks def test_auth_fail(self): transport = yield self.get_transport_with_access_token('foo') resp = yield request( transport, "GET", params={ 'signature': 'foo', 'echostr': 'success' }) self.assertNotEqual(resp.delivered_body, 'success') @inlineCallbacks def test_inbound_text_message(self): transport = yield self.get_transport_with_access_token('foo') resp_d = request( transport, 'POST', data=""" <xml> <ToUserName><![CDATA[toUser]]></ToUserName> <FromUserName><![CDATA[fromUser]]></FromUserName> <CreateTime>1348831860</CreateTime> <MsgType><![CDATA[text]]></MsgType> <Content><![CDATA[this is a test]]></Content> <MsgId>1234567890123456</MsgId> </xml> """.strip()) [msg] = yield self.tx_helper.wait_for_dispatched_inbound(1) reply_msg = yield self.tx_helper.make_dispatch_reply( msg, 'foo') resp = yield resp_d reply = WeChatXMLParser.parse(resp.delivered_body) self.assertEqual(reply.to_user_name, 'fromUser') self.assertEqual(reply.from_user_name, 'toUser') self.assertTrue(int(reply.create_time) > 1348831860) self.assertTrue(isinstance(reply, TextMessage)) [ack] = yield self.tx_helper.wait_for_dispatched_events(1) self.assertEqual(ack['event_type'], 'ack') self.assertEqual(ack['user_message_id'], reply_msg['message_id']) self.assertEqual(ack['sent_message_id'], reply_msg['message_id']) @inlineCallbacks def test_inbound_event_subscribe_message(self): transport = yield self.get_transport_with_access_token('foo') resp = yield request( transport, 'POST', data=""" <xml> <ToUserName> <![CDATA[toUser]]> </ToUserName> <FromUserName> <![CDATA[fromUser]]> </FromUserName> <CreateTime>1395130515</CreateTime> <MsgType> <![CDATA[event]]> </MsgType> <Event> <![CDATA[subscribe]]> </Event> <EventKey><![CDATA[]]></EventKey> </xml> """) self.assertEqual(resp.code, http.OK) [msg] = yield self.tx_helper.wait_for_dispatched_inbound(1) self.assertEqual( msg['session_event'], TransportUserMessage.SESSION_NEW) self.assertEqual(msg['transport_metadata'], { 'wechat': { 'Event': 'subscribe', 'EventKey': '', 'FromUserName': 'fromUser', 'MsgType': 'event', 'ToUserName': 'toUser' } }) @inlineCallbacks def test_inbound_menu_event_click_message(self): transport = yield self.get_transport_with_access_token('foo') resp = yield request( transport, 'POST', data=""" <xml> <ToUserName><![CDATA[toUser]]></ToUserName> <FromUserName><![CDATA[fromUser]]></FromUserName> <CreateTime>123456789</CreateTime> <MsgType><![CDATA[event]]></MsgType> <Event><![CDATA[CLICK]]></Event> <EventKey><![CDATA[EVENTKEY]]></EventKey> </xml> """.strip()) self.assertEqual(resp.code, http.OK) [msg] = yield self.tx_helper.wait_for_dispatched_inbound(1) self.assertEqual( msg['session_event'], TransportUserMessage.SESSION_NEW) self.assertEqual(msg['transport_metadata'], { 'wechat': { 'Event': 'CLICK', 'EventKey': 'EVENTKEY', 'FromUserName': 'fromUser', 'MsgType': 'event', 'ToUserName': 'toUser' } }) self.assertEqual(msg['to_addr'], 'toUser@EVENTKEY') @inlineCallbacks def test_inbound_menu_event_view_message(self): transport = yield self.get_transport_with_access_token('foo') with LogCatcher() as lc: resp = yield request( transport, 'POST', data=""" <xml> <ToUserName><![CDATA[toUser]]></ToUserName> <FromUserName><![CDATA[fromUser]]></FromUserName> <CreateTime>123456789</CreateTime> <MsgType><![CDATA[event]]></MsgType> <Event><![CDATA[VIEW]]></Event> <EventKey><![CDATA[http://www.gotvafrica.com/mobi/home.aspx]]></EventKey> </xml> """.strip()) self.assertEqual(resp.code, http.OK) [] = self.tx_helper.get_dispatched_inbound() msg = lc.messages()[0] self.assertEqual( msg, 'fromUser clicked on http://www.gotvafrica.com/mobi/home.aspx') @inlineCallbacks def test_unsupported_message_type(self): transport = yield self.get_transport_with_access_token('foo') response = yield request( transport, 'POST', data=""" <xml> <ToUserName><![CDATA[toUser]]></ToUserName> <FromUserName><![CDATA[fromUser]]></FromUserName> <CreateTime>1348831860</CreateTime> <MsgType><![CDATA[THIS_IS_UNSUPPORTED]]></MsgType> <Content><![CDATA[this is a test]]></Content> <MsgId>1234567890123456</MsgId> </xml> """.strip()) self.assertEqual( response.code, http.BAD_REQUEST) self.assertEqual( response.delivered_body, "Unsupported MsgType: THIS_IS_UNSUPPORTED") self.assertEqual( [], self.tx_helper.get_dispatched_inbound()) class TestWeChatOutboundMessaging(WeChatTestCase): def dispatch_push_message(self, content, wechat_md, **kwargs): helper_metadata = kwargs.get('helper_metadata', {}) wechat_metadata = helper_metadata.setdefault('wechat', {}) wechat_metadata.update(wechat_md) return self.tx_helper.make_dispatch_outbound( content, helper_metadata=helper_metadata, **kwargs) @inlineCallbacks def test_ack_push_text_message(self): yield self.get_transport_with_access_token('foo') msg_d = self.dispatch_push_message('foo', {}, to_addr='toaddr') request = yield self.request_queue.get() self.assertEqual(request.path, '/message/custom/send') self.assertEqual(request.args, { 'access_token': ['foo'] }) self.assertEqual(json.load(request.content), { 'touser': 'toaddr', 'msgtype': 'text', 'text': { 'content': 'foo' } }) request.finish() [ack] = yield self.tx_helper.wait_for_dispatched_events(1) msg = yield msg_d self.assertEqual(ack['event_type'], 'ack') self.assertEqual(ack['user_message_id'], msg['message_id']) @inlineCallbacks def test_nack_push_text_message(self): yield self.get_transport_with_access_token('foo') msg_d = self.dispatch_push_message('foo', {}) # fail the API request request = yield self.request_queue.get() request.setResponseCode(http.BAD_REQUEST) request.finish() msg = yield msg_d [nack] = yield self.tx_helper.wait_for_dispatched_events(1) self.assertEqual( nack['user_message_id'], msg['message_id']) self.assertEqual(nack['event_type'], 'nack') self.assertEqual(nack['nack_reason'], 'Received status code: 400') @inlineCallbacks def test_ack_push_inferred_news_message(self): yield self.get_transport_with_access_token('foo') # news is a collection or URLs apparently content = ('This is an awesome link for you! http://www.wechat.com/ ' 'Go visit it.') msg_d = self.dispatch_push_message( content, {}, to_addr='toaddr') request = yield self.request_queue.get() self.assertEqual(request.path, '/message/custom/send') self.assertEqual(request.args, { 'access_token': ['foo'] }) self.assertEqual(json.load(request.content), { 'touser': 'toaddr', 'msgtype': 'news', 'news': { 'articles': [ { 'title': 'This is an awesome link for you! ', 'url': 'http://www.wechat.com/', 'description': content, } ] } }) request.finish() [ack] = yield self.tx_helper.wait_for_dispatched_events(1) msg = yield msg_d self.assertEqual(ack['event_type'], 'ack') self.assertEqual(ack['user_message_id'], msg['message_id']) class TestWeChatAccessToken(WeChatTestCase): @inlineCallbacks def test_request_new_access_token(self): transport = yield self.get_transport() config = transport.get_static_config() d = transport.request_new_access_token() req = yield self.request_queue.get() self.assertEqual(req.path, '/token') self.assertEqual(req.args, { 'grant_type': ['client_credential'], 'appid': [config.wechat_appid], 'secret': [config.wechat_secret], }) req.write(json.dumps({ 'access_token': 'the_access_token', 'expires_in': 7200 })) req.finish() access_token = yield d self.assertEqual(access_token, 'the_access_token') cached_token = yield transport.redis.get( WeChatTransport.ACCESS_TOKEN_KEY) self.assertEqual(cached_token, 'the_access_token') expiry = yield transport.redis.ttl(WeChatTransport.ACCESS_TOKEN_KEY) self.assertTrue(int(7200 * 0.8) < expiry <= int(7200 * 0.9)) @inlineCallbacks def test_get_cached_access_token(self): transport = yield self.get_transport() yield transport.redis.set(WeChatTransport.ACCESS_TOKEN_KEY, 'foo') access_token = yield transport.get_access_token() self.assertEqual(access_token, 'foo') # Empty request queue means no WeChat API calls were made self.assertEqual(self.request_queue.size, None) class TestWeChatAddrMasking(WeChatTestCase): @inlineCallbacks def test_default_mask(self): transport = yield self.get_transport_with_access_token('foo') resp_d = request( transport, 'POST', data=""" <xml> <ToUserName><![CDATA[toUser]]></ToUserName> <FromUserName><![CDATA[fromUser]]></FromUserName> <CreateTime>1348831860</CreateTime> <MsgType><![CDATA[text]]></MsgType> <Content><![CDATA[this is a test]]></Content> <MsgId>1234567890123456</MsgId> </xml> """.strip()) [msg] = yield self.tx_helper.wait_for_dispatched_inbound(1) yield self.tx_helper.make_dispatch_reply(msg, 'foo') self.assertEqual( (yield transport.get_addr_mask('fromUser')), transport.DEFAULT_MASK) self.assertEqual(msg['to_addr'], 'toUser@default') yield resp_d @inlineCallbacks def test_mask_switching_on_event_key(self): transport = yield self.get_transport_with_access_token('foo') resp = yield request( transport, 'POST', data=""" <xml> <ToUserName><![CDATA[toUser]]></ToUserName> <FromUserName><![CDATA[fromUser]]></FromUserName> <CreateTime>123456789</CreateTime> <MsgType><![CDATA[event]]></MsgType> <Event><![CDATA[CLICK]]></Event> <EventKey><![CDATA[EVENTKEY]]></EventKey> </xml> """.strip()) self.assertEqual(resp.code, http.OK) [msg] = yield self.tx_helper.wait_for_dispatched_inbound(1) self.assertEqual( msg['session_event'], TransportUserMessage.SESSION_NEW) self.assertEqual( (yield transport.get_addr_mask('fromUser')), 'EVENTKEY') self.assertEqual(msg['to_addr'], 'toUser@EVENTKEY') @inlineCallbacks def test_mask_caching_on_text_message(self): transport = yield self.get_transport_with_access_token('foo') yield transport.cache_addr_mask('fromUser', 'foo') resp_d = request( transport, 'POST', data=""" <xml> <ToUserName><![CDATA[toUser]]></ToUserName> <FromUserName><![CDATA[fromUser]]></FromUserName> <CreateTime>1348831860</CreateTime> <MsgType><![CDATA[text]]></MsgType> <Content><![CDATA[this is a test]]></Content> <MsgId>1234567890123456</MsgId> </xml> """.strip()) [msg] = yield self.tx_helper.wait_for_dispatched_inbound(1) yield self.tx_helper.make_dispatch_reply(msg, 'foo') self.assertEqual(msg['to_addr'], 'toUser@foo') yield resp_d @inlineCallbacks def test_mask_clearing_on_session_end(self): transport = yield self.get_transport_with_access_token('foo') yield transport.cache_addr_mask('fromUser', 'foo') resp_d = request( transport, 'POST', data=""" <xml> <ToUserName><![CDATA[toUser]]></ToUserName> <FromUserName><![CDATA[fromUser]]></FromUserName> <CreateTime>1348831860</CreateTime> <MsgType><![CDATA[text]]></MsgType> <Content><![CDATA[this is a test]]></Content> <MsgId>1234567890123456</MsgId> </xml> """.strip()) [msg] = yield self.tx_helper.wait_for_dispatched_inbound(1) yield self.tx_helper.make_dispatch_reply( msg, 'foo', session_event=TransportUserMessage.SESSION_CLOSE) self.assertEqual(msg['to_addr'], 'toUser@foo') self.assertEqual( (yield transport.get_addr_mask('fromUser')), transport.DEFAULT_MASK) yield resp_d @inlineCallbacks def test_inbound_event_unsubscribe_message(self): transport = yield self.get_transport_with_access_token('foo') yield transport.cache_addr_mask('fromUser', 'foo') resp = yield request( transport, 'POST', data=""" <xml> <ToUserName> <![CDATA[toUser]]> </ToUserName> <FromUserName> <![CDATA[fromUser]]> </FromUserName> <CreateTime>1395130515</CreateTime> <MsgType> <![CDATA[event]]> </MsgType> <Event> <![CDATA[unsubscribe]]> </Event> <EventKey><![CDATA[]]></EventKey> </xml> """) self.assertEqual(resp.code, http.OK) self.assertEqual([], self.tx_helper.get_dispatched_inbound()) self.assertEqual( (yield transport.get_addr_mask('fromUser')), transport.DEFAULT_MASK) class TestWeChatMenuCreation(WeChatTestCase): MENU_TEMPLATE = """ button: - name: Daily Song type: click key: V1001_TODAY_MUSIC - name: ' Artist Profile' type: click key: V1001_TODAY_SINGER - name: Menu sub_button: - name: Search type: view url: 'http://www.soso.com/' - name: Video type: view url: 'http://v.qq.com/' - name: Like us type: click key: V1001_GOOD """ MENU = yaml.safe_load(MENU_TEMPLATE) @inlineCallbacks def test_create_new_menu_success(self): transport = yield self.get_transport_with_access_token('foo') d = transport.create_wechat_menu('foo', self.MENU) req = yield self.request_queue.get() self.assertEqual(req.path, '/menu/create') self.assertEqual(req.args, { 'access_token': ['foo'], }) self.assertEqual(json.load(req.content), self.MENU) req.write(json.dumps({'errcode': 0, 'errmsg': 'ok'})) req.finish() yield d @inlineCallbacks def test_create_new_menu_failure(self): transport = yield self.get_transport_with_access_token('foo') d = transport.create_wechat_menu('foo', self.MENU) req = yield self.request_queue.get() req.write(json.dumps({ 'errcode': 40018, 'errmsg': 'invalid button name size', })) req.finish() exception = yield self.assertFailure(d, WeChatApiException) self.assertEqual( exception.message, ('Received errcode: 40018, errmsg: invalid button name ' 'size when creating WeChat Menu.')) class TestWeChatInferMessage(WeChatTestCase): @inlineCallbacks def test_infer_news_message(self): transport = yield self.get_transport_with_access_token('foo') resp_d = request( transport, 'POST', data=""" <xml> <ToUserName><![CDATA[toUser]]></ToUserName> <FromUserName><![CDATA[fromUser]]></FromUserName> <CreateTime>1348831860</CreateTime> <MsgType><![CDATA[text]]></MsgType> <Content><![CDATA[this is a test]]></Content> <MsgId>10234567890123456</MsgId> </xml> """.strip()) [msg] = yield self.tx_helper.wait_for_dispatched_inbound(1) yield self.tx_helper.make_dispatch_reply( msg, ('To continue you need to accept the T&Cs available at ' 'http://tandcurl.com/ . Have you read and do you accept ' 'the terms and conditions?\n1. Yes\n2. No')) resp = yield resp_d self.assertTrue( '<Url>http://tandcurl.com/</Url>' in resp.delivered_body) self.assertTrue( '<Title>To continue you need to accept the T&amp;Cs available ' 'at </Title>' in resp.delivered_body) self.assertTrue( '<Description>To continue you need to accept the T&amp;Cs ' 'available at http://tandcurl.com/ . Have you read and do ' 'you accept the terms and conditions?\n1. Yes\n2. No' '</Description>' in resp.delivered_body) class TestWeChatEmbedUserProfile(WeChatTestCase): @inlineCallbacks def test_embed_user_profile(self): # NOTE: From http://admin.wechat.com/wiki/index.php?title=User_Profile user_profile = { "subscribe": 1, "openid": "fromUser", "nickname": "Band", "sex": 1, "language": "zh_CN", "city": "Guangzhou", "province": "Guangdong", "country": "China", "headimgurl": ( "http://wx.qlogo.cn/mmopen/g3MonUZtNHkdmzicIlibx6iaFqAc56v" "xLSUfpb6n5WKSYVY0ChQKkiaJSgQ1dZuTOgvLLrhJbERQQ4eMsv84eavH" "iaiceqxibJxCfHe/0"), "subscribe_time": 1382694957 } transport = yield self.get_transport_with_access_token( 'foo', embed_user_profile=True) resp_d = request( transport, 'POST', data=""" <xml> <ToUserName><![CDATA[toUser]]></ToUserName> <FromUserName><![CDATA[fromUser]]></FromUserName> <CreateTime>1348831860</CreateTime> <MsgType><![CDATA[text]]></MsgType> <Content><![CDATA[this is a test]]></Content> <MsgId>10234567890123456</MsgId> </xml> """.strip()) req = yield self.request_queue.get() self.assertEqual(req.args, { 'access_token': ['foo'], 'lang': ['en'], 'openid': ['fromUser'], }) req.write(json.dumps(user_profile)) req.finish() [msg] = yield self.tx_helper.wait_for_dispatched_inbound(1) yield self.tx_helper.make_dispatch_reply(msg, 'Bye!') self.assertEqual( msg['transport_metadata']['wechat']['UserProfile'], user_profile) up_key = transport.user_profile_key('fromUser') cached_up = yield transport.redis.get(up_key) config = transport.get_static_config() self.assertEqual(json.loads(cached_up), user_profile) self.assertTrue(0 < (yield transport.redis.ttl(up_key)) <= config.embed_user_profile_lifetime) yield resp_d class TestWeChatInsanity(WeChatTestCase): @inlineCallbacks def test_double_delivery_handling(self): transport = yield self.get_transport_with_access_token('foo') xml = """ <xml> <ToUserName><![CDATA[toUser]]></ToUserName> <FromUserName><![CDATA[fromUser]]></FromUserName> <CreateTime>1348831860</CreateTime> <MsgType><![CDATA[text]]></MsgType> <Content><![CDATA[this is a test]]></Content> <MsgId>1234567890123456</MsgId> </xml> """.strip() resp1_d = request(transport, 'POST', data=xml) [msg] = yield self.tx_helper.wait_for_dispatched_inbound(1) reply_msg = yield self.tx_helper.make_dispatch_reply( msg, 'foo') resp1 = yield resp1_d reply1 = WeChatXMLParser.parse(resp1.delivered_body) self.assertTrue(isinstance(reply1, TextMessage)) # this one should bounce straight away resp2 = yield request(transport, 'POST', data=xml) self.assertEqual(resp2.code, http.OK) reply2 = WeChatXMLParser.parse(resp2.delivered_body) self.assertEqual(reply1.to_xml(), reply2.to_xml()) # Nothing new was added self.assertEqual(1, len(self.tx_helper.get_dispatched_inbound())) @inlineCallbacks def test_close_double_delivery_handling(self): transport = yield self.get_transport_with_access_token('foo') xml = """ <xml> <ToUserName><![CDATA[toUser]]></ToUserName> <FromUserName><![CDATA[fromUser]]></FromUserName> <CreateTime>1348831860</CreateTime> <MsgType><![CDATA[text]]></MsgType> <Content><![CDATA[%s]]></Content> <MsgId>1234567890123456</MsgId> </xml> """.strip() resp1_d = request(transport, 'POST', data=xml % ('first',)) resp2_d = task.deferLater(reactor, 0.1, request, transport, 'POST', data=xml % ('second',)) [msg] = yield self.tx_helper.wait_for_dispatched_inbound(1) # the second request should return first resp2 = yield resp2_d self.assertEqual(resp2.code, http.OK) self.assertEqual(resp2.delivered_body, '') reply_msg = yield self.tx_helper.make_dispatch_reply( msg, 'foo') resp1 = yield resp1_d reply1 = WeChatXMLParser.parse(resp1.delivered_body) self.assertTrue(isinstance(reply1, TextMessage)) @inlineCallbacks def test_locking(self): transport1 = yield self.get_transport_with_access_token('foo') transport2 = yield self.get_transport_with_access_token('foo') transport3 = yield self.get_transport_with_access_token('foo') if any([isinstance(tx.redis._client, FakeRedis) for tx in [transport1, transport2, transport3]]): raise SkipTest( 'FakeRedis setnx is not atomic. ' 'See https://github.com/praekelt/vumi/issues/789') locks = yield gatherResults([ transport1.mark_as_seen_recently('msg-id'), transport2.mark_as_seen_recently('msg-id'), transport3.mark_as_seen_recently('msg-id'), ]) self.assertEqual(sorted(locks), [0, 0, 1])
TouK/vumi
vumi/transports/wechat/tests/test_wechat.py
Python
bsd-3-clause
27,369
[ "VisIt" ]
e206b108ea8e5d20957bfd6231a39e79751c80c643321eb9b2900e487d9a6b5d
from ovito import * o1 = ObjectNode() print(str(o1)) print(repr(o1)) assert(o1 == o1) o2 = ObjectNode() assert(o1 != o2) assert(ovito.dataset.anim == ovito.dataset.anim)
srinath-chakravarthy/ovito
tests/scripts/test_suite/ovito_object.py
Python
gpl-3.0
170
[ "OVITO" ]
f08245aa7fa03337199b0470cd3bdd64dba47a948fc3b7a12e8371d13a8a97ef
from django.test import TestCase from regressiontests.select_related_regress.models import * class SelectRelatedRegressTests(TestCase): def test_regression_7110(self): """ Regression test for bug #7110. When using select_related(), we must query the Device and Building tables using two different aliases (each) in order to differentiate the start and end Connection fields. The net result is that both the "connections = ..." queries here should give the same results without pulling in more than the absolute minimum number of tables (history has shown that it's easy to make a mistake in the implementation and include some unnecessary bonus joins). """ b=Building.objects.create(name='101') dev1=Device.objects.create(name="router", building=b) dev2=Device.objects.create(name="switch", building=b) dev3=Device.objects.create(name="server", building=b) port1=Port.objects.create(port_number='4',device=dev1) port2=Port.objects.create(port_number='7',device=dev2) port3=Port.objects.create(port_number='1',device=dev3) c1=Connection.objects.create(start=port1, end=port2) c2=Connection.objects.create(start=port2, end=port3) connections=Connection.objects.filter(start__device__building=b, end__device__building=b).order_by('id') self.assertEqual([(c.id, unicode(c.start), unicode(c.end)) for c in connections], [(c1.id, u'router/4', u'switch/7'), (c2.id, u'switch/7', u'server/1')]) connections=Connection.objects.filter(start__device__building=b, end__device__building=b).select_related().order_by('id') self.assertEqual([(c.id, unicode(c.start), unicode(c.end)) for c in connections], [(c1.id, u'router/4', u'switch/7'), (c2.id, u'switch/7', u'server/1')]) # This final query should only join seven tables (port, device and building # twice each, plus connection once). self.assertEqual(connections.query.count_active_tables(), 7) def test_regression_8106(self): """ Regression test for bug #8106. Same sort of problem as the previous test, but this time there are more extra tables to pull in as part of the select_related() and some of them could potentially clash (so need to be kept separate). """ us = TUser.objects.create(name="std") usp = Person.objects.create(user=us) uo = TUser.objects.create(name="org") uop = Person.objects.create(user=uo) s = Student.objects.create(person = usp) o = Organizer.objects.create(person = uop) c = Class.objects.create(org=o) e = Enrollment.objects.create(std=s, cls=c) e_related = Enrollment.objects.all().select_related()[0] self.assertEqual(e_related.std.person.user.name, u"std") self.assertEqual(e_related.cls.org.person.user.name, u"org") def test_regression_8036(self): """ Regression test for bug #8036 the first related model in the tests below ("state") is empty and we try to select the more remotely related state__country. The regression here was not skipping the empty column results for country before getting status. """ australia = Country.objects.create(name='Australia') active = ClientStatus.objects.create(name='active') client = Client.objects.create(name='client', status=active) self.assertEqual(client.status, active) self.assertEqual(Client.objects.select_related()[0].status, active) self.assertEqual(Client.objects.select_related('state')[0].status, active) self.assertEqual(Client.objects.select_related('state', 'status')[0].status, active) self.assertEqual(Client.objects.select_related('state__country')[0].status, active) self.assertEqual(Client.objects.select_related('state__country', 'status')[0].status, active) self.assertEqual(Client.objects.select_related('status')[0].status, active) def test_multi_table_inheritance(self): """ Exercising select_related() with multi-table model inheritance. """ c1 = Child.objects.create(name="child1", value=42) i1 = Item.objects.create(name="item1", child=c1) i2 = Item.objects.create(name="item2") self.assertQuerysetEqual( Item.objects.select_related("child").order_by("name"), ["<Item: item1>", "<Item: item2>"] ) def test_regression_12851(self): """ Regression for #12851 Deferred fields are used correctly if you select_related a subset of fields. """ australia = Country.objects.create(name='Australia') active = ClientStatus.objects.create(name='active') wa = State.objects.create(name="Western Australia", country=australia) c1 = Client.objects.create(name='Brian Burke', state=wa, status=active) burke = Client.objects.select_related('state').defer('state__name').get(name='Brian Burke') self.assertEqual(burke.name, u'Brian Burke') self.assertEqual(burke.state.name, u'Western Australia') # Still works if we're dealing with an inherited class sc1 = SpecialClient.objects.create(name='Troy Buswell', state=wa, status=active, value=42) troy = SpecialClient.objects.select_related('state').defer('state__name').get(name='Troy Buswell') self.assertEqual(troy.name, u'Troy Buswell') self.assertEqual(troy.value, 42) self.assertEqual(troy.state.name, u'Western Australia') # Still works if we defer an attribute on the inherited class troy = SpecialClient.objects.select_related('state').defer('value', 'state__name').get(name='Troy Buswell') self.assertEqual(troy.name, u'Troy Buswell') self.assertEqual(troy.value, 42) self.assertEqual(troy.state.name, u'Western Australia') # Also works if you use only, rather than defer troy = SpecialClient.objects.select_related('state').only('name').get(name='Troy Buswell') self.assertEqual(troy.name, u'Troy Buswell') self.assertEqual(troy.value, 42) self.assertEqual(troy.state.name, u'Western Australia')
mzdaniel/oh-mainline
vendor/packages/Django/tests/regressiontests/select_related_regress/tests.py
Python
agpl-3.0
6,336
[ "Brian" ]
f1db7a40626f6c9a352cec20ce8b2a9d3d8f8724c2b145d1d5bede72465fef43
""" Test_RSS_Policy_Configurations """ import unittest import DIRAC.ResourceStatusSystem.Policy.Configurations as moduleTested __RCSID__ = '$Id: $' ################################################################################ class Configurations_TestCase( unittest.TestCase ): def setUp( self ): """ Setup """ self.moduleTested = moduleTested def tearDown( self ): """ TearDown """ del self.moduleTested ################################################################################ # Tests class Configurations_Success( Configurations_TestCase ): def test_policiesMeta( self ): """ tests that the configuration does not have any funny key """ self.assertEqual( True, hasattr( self.moduleTested, 'POLICIESMETA' ) ) policiesMeta = self.moduleTested.POLICIESMETA for _policyName, policyMeta in policiesMeta.items(): self.assertEqual( [ 'args', 'command', 'description', 'module' ], policyMeta.keys() ) ################################################################################ ################################################################################ if __name__ == '__main__': suite = unittest.defaultTestLoader.loadTestsFromTestCase( Configurations_TestCase ) suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( Configurations_Success ) ) testResult = unittest.TextTestRunner( verbosity = 2 ).run( suite ) #EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
vmendez/DIRAC
ResourceStatusSystem/Policy/test/Test_RSS_Policy_Configurations.py
Python
gpl-3.0
1,548
[ "DIRAC" ]
926dc0ef254596a09d53a3feea36287ad67953b155993b77681b381df6e0501a
from system.core.controller import* import random import datetime from time import strftime class Ninja(Controller): def __init__(self, action): super(Ninja, self).__init__(action) def index(self): try: session['gold'] except: session['gold'] = 0 try: session['activities'] except: session['activities'] = [] return self.load_view('ninja.html') def clear(self): session.clear() return redirect ('/') def process(self): action = request.form["action"] randomNumber = random.random() print randomNumber if action == "farm": earn = int(randomNumber*10)+10 elif action == "cave": earn = int(randomNumber*5)+5 elif action == "house": earn = int(randomNumber*3)+3 elif action == "casino": earn = int(randomNumber*100)-50 session['gold'] += earn timeNow = datetime.datetime.now().strftime('%Y/%m/%d') if earn >=0 : newAction = {'status' : 'earn', 'action' : "Earned {} gold from {} ({})".format(earn, action, timeNow)} else: newAction = {'status' : 'lost', 'action' : "Entered Casino and lost {} gold ({})".format(-earn, action, timeNow)} print newAction session["activities"].append(newAction) print session["activities"] return redirect('/')
authman/Python201609
pairol_alex/Assignments/pylotNinja/controllers/Ninja.py
Python
mit
1,500
[ "CASINO" ]
813b37436291951e87464f995f304b11ea782ed70c463e7788468ee8717bbfdd
import numpy import random import time from scipy.special import gammaln # TODO: remove everything about multinomials in here. Pass in a distribution # object with functions for sampling etc... class multinomial(): """ Class for keeping track of the parameters of a single multinomial. """ def __init__(self, number_features, smooth=.0000001): # the probabilities self.phi = numpy.zeros([number_features]) # counts self.n = 0 self.smooth = smooth def posterior(self, instance): """ args: instance - an observation of the PMF """ ps = (self.phi + self.smooth) / numpy.sum(self.phi + self.smooth) p = gammaln(numpy.sum(instance) + 1) p -= numpy.sum(gammaln(instance + 1)) p += numpy.sum(instance * numpy.log(ps)) return p def rem_i(self, instance): self.n -= 1 self.phi -= instance def add_i(self, instance): self.n += 1 self.phi += instance class gaussian(): def __init__(self, number_features, smooth=.0000001): """ args: number_features - number of features to expect for initialization """ self.data = [] self.sigma = numpy.zeros([number_features, number_features]) self.mu = numpy.zeros(number_features) # the number of instances in this component self.n = 0 self.smooth = smooth def update_params(self): if len(self.data) == 1: self.sigma = numpy.array([self.n ** (-1. / (len(self.mu) + 4))]) elif len(self.data) > 1: self.sigma = numpy.cov(numpy.array(self.data).T) self.mu = numpy.array(self.data).sum(axis=0) / float(len(self.data)) else: self.mu = [0] * len(self.mu) def posterior(self, instance): """ args: instance - an observation of the PMF returns: the posterior probability """ if len(self.sigma) == 1: ll = - .5 * numpy.log(self.sigma[0] ** len(self.mu)) dotpart = .5 * (instance - self.mu) * (1 / self.sigma[0]) ll -= numpy.dot(dotpart, (instance - self.mu)) ll -= self.n / 2 * numpy.log(numpy.pi) return ll determinant = numpy.linalg.det(self.sigma) if determinant <= 0: # use kde total_ll = 0. bandwidth = self.n ** (-1. / (len(self.mu) + 4)) for kernel in self.data: ll = - .5 * numpy.log(bandwidth ** len(self.mu)) dotpart = .5 * (instance - self.mu) * (1 / bandwidth) ll -= numpy.dot(dotpart, (instance - self.mu)) ll -= self.n / 2 * numpy.log(numpy.pi) total_ll += ll return total_ll / self.n # determinant is positive and non-negative; do normal pdf norm_coeff = (len(self.sigma) * numpy.log(2 * numpy.pi) + numpy.log(determinant)) error = instance - self.mu numerator = numpy.linalg.solve(self.sigma, error).T.dot(error) return - 0.5 * (norm_coeff + numerator) def rem_i(self, instance): self.n -= 1 # remove the instance from the data, recalculate sigma if tuple(instance) in self.data: self.data.pop(self.data.index(tuple(instance))) self.update_params() def add_i(self, instance): self.n += 1 self.data.append(tuple(instance)) self.update_params() class dpmm: """ Dirichlet process mixture model with gibbs sampling approach to sampling for non-conjugate distributions. See alg. 8 in Neal 2000. """ def __init__(self, data, alpha, smooth, m, base_distribution=multinomial, debug=False): """ args: data - a list of arrays or a 2d array alpha - parameter to the dirichlet smooth - float for laplace smoothing for LL calculation m - number of auxiliary parameters """ self.debug = debug self.base_distribution = base_distribution self.btime = 0 # the number of features self.nf = len(data[0]) self.alpha = float(alpha) self.N = len(data) self.smooth = float(smooth) self.data = data self.c = numpy.array([None] * self.N) # component membership for each instance self.components = [] # for i in range(self.N): # self.add_i(numpy.random.randint(0, n_init_components), i) self.new_components = [] # initialize # the number of samples averaged to approximate the integral over F * # dG(phi) self.m = m self.add_i(0, 0) self.add_i(0, 1) def fit(self, iterations): """ The gibbs sampling loop. """ output = [] if self.debug: print "starting" for n in xrange(iterations): # sample new once per iteration self.new_components = [] for i in xrange(self.m): self.add_rand_phi() for i in xrange(self.N): self.sample_c(i) ll = self.loglikelihood() output.append( [str(n), str([c.n for c in self.components]), str(ll)]) if self.debug: print "iteration ", n, [c.n for c in self.components], ll return output def sample_c(self, i): """ Sample component for instance i. """ self.rem_i(i) p = self.pc_x(i) new_c = numpy.random.multinomial(1, p).argmax() self.add_i(new_c, i) def loglikelihood(self): """ Calculate the LL of the current state """ # the coefficients co = [] ll = 0 for ci in range(len(self.components)): co.append(self.components[ci].n / (self.N - 1 + self.alpha)) # co.append(1.) # model fit for i in range(len(self.data)): c = self.c[i] p = self.components[c].posterior(self.data[i]) if self.debug: if numpy.isnan(p): print "NAN", self.components[c].__dict__, ll, p import sys sys.exit() ll += p ll += numpy.sum(co) return ll def pc_x(self, ind): """ Calculate the probability of each component given the instance at ind args: ind - index in self.data """ elapsed = time.time() p = [] new_co = (numpy.log(self.alpha) - numpy.log(self.m) - numpy.log((self.N - 1 + self.alpha))) for c in range(len(self.components)): co = (numpy.log(self.components[c].n) - numpy.log((self.N - 1 + self.alpha))) lp = self.components[c].posterior(self.data[ind]) p.append(lp + co) for c in range(self.m): lp = self.new_components[c].posterior(self.data[ind]) p.append(lp + new_co) # normalize p p -= numpy.max(p) p = numpy.exp(p) p /= numpy.sum(p) if self.btime: print "pc_x", time.time() - elapsed return p def add_rand_phi(self, size=10, random_size=True): """ Take a random sample of instances and create a component from them. args: size - the number of instances to use to create the component. If random then the maximum size. random_size - select a random number of instances in [1, size] to create a component. """ elapsed = time.time() if random_size: size = random.randint(1, size) inds = numpy.random.permutation(range(self.N))[:size] self.new_components.append(self.base_distribution(self.nf)) for instance in self.data[inds]: self.new_components[-1].add_i(instance) if self.btime: print "add_rand_phi", time.time() - elapsed def rem_i(self, i): elapsed = time.time() c = self.c[i] if c is None: return None if self.components[c].n == 1: # if this was not the last listed component, change indices in if c < len(self.components) - 1: ind = numpy.where(self.c > c) self.c[ind] = self.c[ind] - 1 self.components.pop(c) else: self.components[c].rem_i(self.data[i]) if self.btime: print "rem_i", time.time() - elapsed def add_i(self, c, i): elapsed = time.time() # new component if c >= len(self.components): self.c[i] = len(self.components) self.components.append(self.base_distribution(self.nf)) self.components[self.c[i]].add_i(self.data[i]) else: self.c[i] = c self.components[c].add_i(self.data[i]) if self.btime: print "add_i", time.time() - elapsed def get_labels(self, data, random_assign=False): """ Get the component/cluster labels of the given data. The case where you don't randomly assign labels based on true model probablilities (including dirichlet prior) is akin to a step of EM. args: data - a numpy array of int random_assign - whether to take the most likely component or sample returns: list of ints """ labels = [] for i, instance in enumerate(data): ps = numpy.zeros(len(self.components)) for c, component in enumerate(self.components): ps[c] = component.posterior(instance) ps /= ps.sum() new_c = ps.argmax() if random_assign: new_c = numpy.random.multinomial(1, ps).argmax() labels.append(new_c) return labels def update_data(self, sample, random_assign=True): """ Update the data and assign new data to existing components. For use in bagging. args: sample - a data set numpy array of int random_assign - assign points to a randomly selected component based on their likelihood """ self.data = sample self.N = len(self.data) for i, c in enumerate(self.get_labels(sample, random_assign)): self.add_i(c, i) # remove components that have no members c = 0 while c < len(self.components): if len(numpy.where(self.c == c)[0]) == 0: ind = numpy.where(self.c > c)[0] self.c[ind] = self.c[ind] - 1 self.components.pop(c) else: c += 1 # recalculate all components for c in range(len(self.components)): component_instances = sample[numpy.where(self.c == c)] self.components[c].phi = numpy.zeros([self.nf]) self.components[c].n = 0 for ci in component_instances: self.components[c].add_i(ci) if self.debug: print "INITIALIZED", [c.n for c in self.components] def fit_bagging(self, all_data, iterations, bag_size=.1, n_bags=10, random_assign=True): """ Execute gibbs sampling on bagged samples of the data. Instances are assigned to the most likely existing (or sampled) component between bags, thus ensuring a continuation of the same model. args: iterations - number of iterations bag_size - fraction of data set size if in (0, 1), else just the number of instances n_bags - how many iterations of bagging random_assign - whether to use the most likely or a sampled component for assigning the new samples to the existing model """ b_size = bag_size if 0 < bag_size < 1: b_size = int(len(all_data) * bag_size) for s in range(n_bags): sample = all_data[numpy.random.permutation( range(len(all_data)))[:b_size]].astype(int) self.update_data(sample, random_assign) self.fit(iterations) if __name__ == "__main__": print "TEST RANDOM SIMPLE MULTINOMIALS" data = [] n_draws = 5 for i in range(50): data.append(numpy.random.multinomial( n_draws, [.5, .2, .1, .05, .03, .02], size=1)[0]) for i in range(50): data.append(numpy.random.multinomial( n_draws, [.02, .03, .05, .1, .2, .5], size=1)[0]) # for i in range(50): # data.append(numpy.random.multinomial( # n_draws, [.1, .1, .3, .3, .1, .1], size=1)[0]) data = numpy.array(data) model = dpmm(data, .1, .000000001, 1, 5) pre_fit = model.get_labels(data) import time start = time.time() model.fit(50) # model.fit_bagging(data, 50, .5, 10) print "elapsed", time.time() - start print "pre", pre_fit print "post", model.get_labels(data) # print "TEST BAGGING" # model.fit_bagging(data, 50, .5, 10) # print "elapsed", time.time() - start # # print "pre", pre_fit # print "post", model.get_labels(data)
arider/riderml
riderml/mixture_model/dpmm.py
Python
mit
13,519
[ "Gaussian" ]
e532d51dcdd8ab2c68dd663c99831df8f5161e8648e699017e914207e2aa1bcb
# Compiled by Charles Harris # Taken from his email message to scipy-dev # dated October 3, 2002 # updated to 2002 values by BasSw, 2006 """ Fundamental Physical Constants These constants are taken from CODATA Recommended Values of the Fundamental Physical Constants: 2002. They may be found at physics.nist.gov/constants. The values are stored in the dictionary physical_constants as a tuple containing the value, the units, and the relative precision, in that order. All constants are in SI units unless otherwise stated. Several helper functions are provided: value(key) returns the value of the physical constant. unit(key) returns the units of the physical constant. precision(key) returns the relative precision of the physical constant. find(sub) prints out a list of keys containing the string sub. """ from __future__ import print_function import string from math import pi, sqrt __all__ = ['physical_constants', 'value', 'unit', 'precision', 'find'] """ From: http://physics.nist.gov/constants Source: Peter J. Mohr and Barry N. Taylor, CODATA Recommended Values of the Fundamental Physical Constants: 2002, published in Rev. Mod. Phys. vol. 77(1) 1-107 (2005). Quantity Value Uncertainty Unit """ txt = """speed of light in vacuum 299 792 458 0 m s^-1 magn. constant 12.566 370 614...e-7 0 N A^-2 electric constant 8.854 187 817...e-12 0 F m^-1 characteristic impedance of vacuum 376.730 313 461... 0 ohm Newtonian constant of gravitation 6.6742e-11 0.0010e-11 m^3 kg^-1 s^-2 Newtonian constant of gravitation over h-bar c 6.7087e-39 0.0010e-39 (GeV/c^2)^-2 Planck constant 6.626 0693e-34 0.000 0011e-34 J s Planck constant in eV s 4.135 667 43e-15 0.000 000 35e-15 eV s Planck constant over 2 pi times c in MeV fm 197.326 968 0.000 017 MeV fm Planck constant over 2 pi 1.054 571 68e-34 0.000 000 18e-34 J s Planck constant over 2 pi in eV s 6.582 119 15e-16 0.000 000 56e-16 eV s Planck mass 2.176 45e-8 0.000 16e-8 kg Planck temperature 1.416 79e32 0.000 11e32 K Planck length 1.616 24e-35 0.000 12e-35 m Planck time 5.391 21e-44 0.000 40e-44 s elementary charge 1.602 176 53e-19 0.000 000 14e-19 C elementary charge over h 2.417 989 40e14 0.000 000 21e14 A J^-1 magn. flux quantum 2.067 833 72e-15 0.000 000 18e-15 Wb conductance quantum 7.748 091 733e-5 0.000 000 026e-5 S inverse of conductance quantum 12 906.403 725 0.000 043 ohm Josephson constant 483 597.879e9 0.041e9 Hz V^-1 von Klitzing constant 25 812.807 449 0.000 086 ohm Bohr magneton 927.400 949e-26 0.000 080e-26 J T^-1 Bohr magneton in eV/T 5.788 381 804e-5 0.000 000 039e-5 eV T^-1 Bohr magneton in Hz/T 13.996 2458e9 0.000 0012e9 Hz T^-1 Bohr magneton in inverse meters per tesla 46.686 4507 0.000 0040 m^-1 T^-1 Bohr magneton in K/T 0.671 7131 0.000 0012 K T^-1 nuclear magneton 5.050 783 43e-27 0.000 000 43e-27 J T^-1 nuclear magneton in eV/T 3.152 451 259e-8 0.000 000 021e-8 eV T^-1 nuclear magneton in MHz/T 7.622 593 71 0.000 000 65 MHz T^-1 nuclear magneton in inverse meters per tesla 2.542 623 58e-2 0.000 000 22e-2 m^-1 T^-1 nuclear magneton in K/T 3.658 2637e-4 0.000 0064e-4 K T^-1 fine-structure constant 7.297 352 568e-3 0.000 000 024e-3 inverse fine-structure constant 137.035 999 11 0.000 000 46 Rydberg constant 10 973 731.568 525 0.000 073 m^-1 Rydberg constant times c in Hz 3.289 841 960 360e15 0.000 000 000 022e15 Hz Rydberg constant times hc in J 2.179 872 09e-18 0.000 000 37e-18 J Rydberg constant times hc in eV 13.605 6923 0.000 0012 eV Bohr radius 0.529 177 2108e-10 0.000 000 0018e-10 m Hartree energy 4.359 744 17e-18 0.000 000 75e-18 J Hartree energy in eV 27.211 3845 0.000 0023 eV quantum of circulation 3.636 947 550e-4 0.000 000 024e-4 m^2 s^-1 quantum of circulation times 2 7.273 895 101e-4 0.000 000 048e-4 m^2 s^-1 Fermi coupling constant 1.166 39e-5 0.000 01e-5 GeV^-2 weak mixing angle 0.222 15 0.000 76 electron mass 9.109 3826e-31 0.000 0016e-31 kg electron mass in u 5.485 799 0945e-4 0.000 000 0024e-4 u electron mass energy equivalent 8.187 1047e-14 0.000 0014e-14 J electron mass energy equivalent in MeV 0.510 998 918 0.000 000 044 MeV electron-muon mass ratio 4.836 331 67e-3 0.000 000 13e-3 electron-tau mass ratio 2.875 64e-4 0.000 47e-4 electron-proton mass ratio 5.446 170 2173e-4 0.000 000 0025e-4 electron-neutron mass ratio 5.438 673 4481e-4 0.000 000 0038e-4 electron-deuteron mass ratio 2.724 437 1095e-4 0.000 000 0013e-4 electron to alpha particle mass ratio 1.370 933 555 75e-4 0.000 000 000 61e-4 electron charge to mass quotient -1.758 820 12e11 0.000 000 15e11 C kg^-1 electron molar mass 5.485 799 0945e-7 0.000 000 0024e-7 kg mol^-1 Compton wavelength 2.426 310 238e-12 0.000 000 016e-12 m Compton wavelength over 2 pi 386.159 2678e-15 0.000 0026e-15 m classical electron radius 2.817 940 325e-15 0.000 000 028e-15 m Thomson cross section 0.665 245 873e-28 0.000 000 013e-28 m^2 electron magn. moment -928.476 412e-26 0.000 080e-26 J T^-1 electron magn. moment to Bohr magneton ratio -1.001 159 652 1859 0.000 000 000 0038 electron magn. moment to nuclear magneton ratio -1838.281 971 07 0.000 000 85 electron magn. moment anomaly 1.159 652 1859e-3 0.000 000 0038e-3 electron g factor -2.002 319 304 3718 0.000 000 000 0075 electron-muon magn. moment ratio 206.766 9894 0.000 0054 electron-proton magn. moment ratio -658.210 6862 0.000 0066 electron to shielded proton magn. moment ratio -658.227 5956 0.000 0071 electron-neutron magn. moment ratio 960.920 50 0.000 23 electron-deuteron magn. moment ratio -2143.923 493 0.000 023 electron to shielded helion magn. moment ratio 864.058 255 0.000 010 electron gyromagn. ratio 1.760 859 74e11 0.000 000 15e11 s^-1 T^-1 electron gyromagn. ratio over 2 pi 28 024.9532 0.0024 MHz T^-1 muon mass 1.883 531 40e-28 0.000 000 33e-28 kg muon mass in u 0.113 428 9264 0.000 000 0030 u muon mass energy equivalent 1.692 833 60e-11 0.000 000 29e-11 J muon mass energy equivalent in MeV 105.658 3692 0.000 0094 MeV muon-electron mass ratio 206.768 2838 0.000 0054 muon-tau mass ratio 5.945 92e-2 0.000 97e-2 muon-proton mass ratio 0.112 609 5269 0.000 000 0029 muon-neutron mass ratio 0.112 454 5175 0.000 000 0029 muon molar mass 0.113 428 9264e-3 0.000 000 0030e-3 kg mol^-1 muon Compton wavelength 11.734 441 05e-15 0.000 000 30e-15 m muon Compton wavelength over 2 pi 1.867 594 298e-15 0.000 000 047e-15 m muon magn. moment -4.490 447 99e-26 0.000 000 40e-26 J T^-1 muon magn. moment to Bohr magneton ratio -4.841 970 45e-3 0.000 000 13e-3 muon magn. moment to nuclear magneton ratio -8.890 596 98 0.000 000 23 muon magn. moment anomaly 1.165 919 81e-3 0.000 000 62e-3 muon g factor -2.002 331 8396 0.000 000 0012 muon-proton magn. moment ratio -3.183 345 118 0.000 000 089 tau mass 3.167 77e-27 0.000 52e-27 kg tau mass in u 1.907 68 0.000 31 u tau mass energy equivalent 2.847 05e-10 0.000 46e-10 J tau mass energy equivalent in MeV 1776.99 0.29 MeV tau-electron mass ratio 3477.48 0.57 tau-muon mass ratio 16.8183 0.0027 tau-proton mass ratio 1.893 90 0.000 31 tau-neutron mass ratio 1.891 29 0.000 31 tau molar mass 1.907 68e-3 0.000 31e-3 kg mol^-1 tau Compton wavelength 0.697 72e-15 0.000 11e-15 m tau Compton wavelength over 2 pi 0.111 046e-15 0.000 018e-15 m proton mass 1.672 621 71e-27 0.000 000 29e-27 kg proton mass in u 1.007 276 466 88 0.000 000 000 13 u proton mass energy equivalent 1.503 277 43e-10 0.000 000 26e-10 J proton mass energy equivalent in MeV 938.272 029 0.000 080 MeV proton-electron mass ratio 1836.152 672 61 0.000 000 85 proton-muon mass ratio 8.880 243 33 0.000 000 23 proton-tau mass ratio 0.528 012 0.000 086 proton-neutron mass ratio 0.998 623 478 72 0.000 000 000 58 proton charge to mass quotient 9.578 833 76e7 0.000 000 82e7 C kg^-1 proton molar mass 1.007 276 466 88e-3 0.000 000 000 13e-3 kg mol^-1 proton Compton wavelength 1.321 409 8555e-15 0.000 000 0088e-15 m proton Compton wavelength over 2 pi 0.210 308 9104e-15 0.000 000 0014e-15 m proton magn. moment 1.410 606 71e-26 0.000 000 12e-26 J T^-1 proton magn. moment to Bohr magneton ratio 1.521 032 206e-3 0.000 000 015e-3 proton magn. moment to nuclear magneton ratio 2.792 847 351 0.000 000 028 proton g factor 5.585 694 701 0.000 000 056 proton-neutron magn. moment ratio -1.459 898 05 0.000 000 34 shielded proton magn. moment 1.410 570 47e-26 0.000 000 12e-26 J T^-1 shielded proton magn. moment to Bohr magneton ratio 1.520 993 132e-3 0.000 000 016e-3 shielded proton magn. moment to nuclear magneton ratio 2.792 775 604 0.000 000 030 proton magn. shielding correction 25.689e-6 0.015e-6 proton gyromagn. ratio 2.675 222 05e8 0.000 000 23e8 s^-1 T^-1 proton gyromagn. ratio over 2 pi 42.577 4813 0.000 0037 MHz T^-1 shielded proton gyromagn. ratio 2.675 153 33e8 0.000 000 23e8 s^-1 T^-1 shielded proton gyromagn. ratio over 2 pi 42.576 3875 0.000 0037 MHz T^-1 proton rms charge radius 0.8750e-15 0.0068e-15 m neutron mass 1.674 927 28e-27 0.000 000 29e-27 kg neutron mass in u 1.008 664 915 60 0.000 000 000 55 u neutron mass energy equivalent 1.505 349 57e-10 0.000 000 26e-10 J neutron mass energy equivalent in MeV 939.565 360 0.000 081 MeV neutron-electron mass ratio 1838.683 6598 0.000 0013 neutron-muon mass ratio 8.892 484 02 0.000 000 23 neutron-tau mass ratio 0.528 740 0.000 086 neutron-proton mass ratio 1.001 378 418 70 0.000 000 000 58 neutron molar mass 1.008 664 915 60e-3 0.000 000 000 55e-3 kg mol^-1 neutron Compton wavelength 1.319 590 9067e-15 0.000 000 0088e-15 m neutron Compton wavelength over 2 pi 0.210 019 4157e-15 0.000 000 0014e-15 m neutron magn. moment -0.966 236 45e-26 0.000 000 24e-26 J T^-1 neutron magn. moment to Bohr magneton ratio -1.041 875 63e-3 0.000 000 25e-3 neutron magn. moment to nuclear magneton ratio -1.913 042 73 0.000 000 45 neutron g factor -3.826 085 46 0.000 000 90 neutron-electron magn. moment ratio 1.040 668 82e-3 0.000 000 25e-3 neutron-proton magn. moment ratio -0.684 979 34 0.000 000 16 neutron to shielded proton magn. moment ratio -0.684 996 94 0.000 000 16 neutron gyromagn. ratio 1.832 471 83e8 0.000 000 46e8 s^-1 T^-1 neutron gyromagn. ratio over 2 pi 29.164 6950 0.000 0073 MHz T^-1 deuteron mass 3.343 583 35e-27 0.000 000 57e-27 kg deuteron mass in u 2.013 553 212 70 0.000 000 000 35 u deuteron mass energy equivalent 3.005 062 85e-10 0.000 000 51e-10 J deuteron mass energy equivalent in MeV 1875.612 82 0.000 16 MeV deuteron-electron mass ratio 3670.482 9652 0.000 0018 deuteron-proton mass ratio 1.999 007 500 82 0.000 000 000 41 deuteron molar mass 2.013 553 212 70e-3 0.000 000 000 35e-3 kg mol^-1 deuteron magn. moment 0.433 073 482e-26 0.000 000 038e-26 J T^-1 deuteron magn. moment to Bohr magneton ratio 0.466 975 4567e-3 0.000 000 0050e-3 deuteron magn. moment to nuclear magneton ratio 0.857 438 2329 0.000 000 0092 deuteron-electron magn. moment ratio -4.664 345 548e-4 0.000 000 050e-4 deuteron-proton magn. moment ratio 0.307 012 2084 0.000 000 0045 deuteron-neutron magn. moment ratio -0.448 206 52 0.000 000 11 deuteron rms charge radius 2.1394e-15 0.0028e-15 m helion mass 5.006 412 14e-27 0.000 000 86e-27 kg helion mass in u 3.014 932 2434 0.000 000 0058 u helion mass energy equivalent 4.499 538 84e-10 0.000 000 77e-10 J helion mass energy equivalent in MeV 2808.391 42 0.000 24 MeV helion-electron mass ratio 5495.885 269 0.000 011 helion-proton mass ratio 2.993 152 6671 0.000 000 0058 helion molar mass 3.014 932 2434e-3 0.000 000 0058e-3 kg mol^-1 shielded helion magn. moment -1.074 553 024e-26 0.000 000 093e-26 J T^-1 shielded helion magn. moment to Bohr magneton ratio -1.158 671 474e-3 0.000 000 014e-3 shielded helion magn. moment to nuclear magneton ratio -2.127 497 723 0.000 000 025 shielded helion to proton magn. moment ratio -0.761 766 562 0.000 000 012 shielded helion to shielded proton magn. moment ratio -0.761 786 1313 0.000 000 0033 shielded helion gyromagn. ratio 2.037 894 70e8 0.000 000 18e8 s^-1 T^-1 shielded helion gyromagn. ratio over 2 pi 32.434 1015 0.000 0028 MHz T^-1 alpha particle mass 6.644 6565e-27 0.000 0011e-27 kg alpha particle mass in u 4.001 506 179 149 0.000 000 000 056 u alpha particle mass energy equivalent 5.971 9194e-10 0.000 0010e-10 J alpha particle mass energy equivalent in MeV 3727.379 17 0.000 32 MeV alpha particle-electron mass ratio 7294.299 5363 0.000 0032 alpha particle-proton mass ratio 3.972 599 689 07 0.000 000 000 52 alpha particle molar mass 4.001 506 179 149e-3 0.000 000 000 056e-3 kg mol^-1 Avogadro constant 6.022 1415e23 0.000 0010e23 mol^-1 atomic mass constant 1.660 538 86e-27 0.000 000 28e-27 kg atomic mass constant energy equivalent 1.492 417 90e-10 0.000 000 26e-10 J atomic mass constant energy equivalent in MeV 931.494 043 0.000 080 MeV Faraday constant 96 485.3383 0.0083 C mol^-1 Faraday constant for conventional electric current 96 485.336 0.016 C_90 mol^-1 molar Planck constant 3.990 312 716e-10 0.000 000 027e-10 J s mol^-1 molar Planck constant times c 0.119 626 565 72 0.000 000 000 80 J m mol^-1 molar gas constant 8.314 472 0.000 015 J mol^-1 K^-1 Boltzmann constant 1.380 6505e-23 0.000 0024e-23 J K^-1 Boltzmann constant in eV/K 8.617 343e-5 0.000 015e-5 eV K^-1 Boltzmann constant in Hz/K 2.083 6644e10 0.000 0036e10 Hz K^-1 Boltzmann constant in inverse meters per kelvin 69.503 56 0.000 12 m^-1 K^-1 molar volume of ideal gas (273.15 K, 101.325 kPa) 22.413 996e-3 0.000 039e-3 m^3 mol^-1 Loschmidt constant (273.15 K, 101.325 kPa) 2.686 7773e25 0.000 0047e25 m^-3 molar volume of ideal gas (273.15 K, 100 kPa) 22.710 981e-3 0.000 040e-3 m^3 mol^-1 Sackur-Tetrode constant (1 K, 100 kPa) -1.151 7047 0.000 0044 Sackur-Tetrode constant (1 K, 101.325 kPa) -1.164 8677 0.000 0044 Stefan-Boltzmann constant 5.670 400e-8 0.000 040e-8 W m^-2 K^-4 first radiation constant 3.741 771 38e-16 0.000 000 64e-16 W m^2 first radiation constant for spectral radiance 1.191 042 82e-16 0.000 000 20e-16 W m^2 sr^-1 second radiation constant 1.438 7752e-2 0.000 0025e-2 m K Wien displacement law constant 2.897 7685e-3 0.000 0051e-3 m K molar mass of carbon-12 12e-3 0 kg mol^-1 molar mass constant 1e-3 0 kg mol^-1 conventional value of Josephson constant 483 597.9e9 0 Hz V^-1 conventional value of von Klitzing constant 25 812.807 0 ohm standard atmosphere 101 325 0 Pa standard acceleration of gravity 9.806 65 0 m s^-2 Cu x unit 1.002 077 10e-13 0.000 000 29e-13 m Mo x unit 1.002 099 66e-13 0.000 000 53e-13 m Angstrom star 1.000 015 09e-10 0.000 000 90e-10 m lattice parameter of silicon 543.102 122e-12 0.000 020e-12 m {220} lattice spacing of silicon 192.015 5965e-12 0.000 0070e-12 m molar volume of silicon 12.058 8382e-6 0.000 0024e-6 m^3 mol^-1 electron volt 1.602 176 53e-19 0.000 000 14e-19 J unified atomic mass unit 1.660 538 86e-27 0.000 000 28e-27 kg natural unit of velocity 299 792 458 0 m s^-1 natural unit of action 1.054 571 68e-34 0.000 000 18e-34 J s natural unit of action in eV s 6.582 119 15e-16 0.000 000 56e-16 eV s natural unit of mass 9.109 3826e-31 0.000 0016e-31 kg natural unit of energy 8.187 1047e-14 0.000 0014e-14 J natural unit of energy in MeV 0.510 998 918 0.000 000 044 MeV natural unit of momentum 2.730 924 19e-22 0.000 000 47e-22 kg m s^-1 natural unit of momentum in MeV/c 0.510 998 918 0.000 000 044 MeV/c natural unit of length 386.159 2678e-15 0.000 0026e-15 m natural unit of time 1.288 088 6677e-21 0.000 000 0086e-21 s atomic unit of charge 1.602 176 53e-19 0.000 000 14e-19 C atomic unit of mass 9.109 3826e-31 0.000 0016e-31 kg atomic unit of action 1.054 571 68e-34 0.000 000 18e-34 J s atomic unit of length 0.529 177 2108e-10 0.000 000 0018e-10 m atomic unit of energy 4.359 744 17e-18 0.000 000 75e-18 J atomic unit of time 2.418 884 326 505e-17 0.000 000 000 016e-17 s atomic unit of force 8.238 7225e-8 0.000 0014e-8 N atomic unit of velocity 2.187 691 2633e6 0.000 000 0073e6 m s^-1 atomic unit of momentum 1.992 851 66e-24 0.000 000 34e-24 kg m s^-1 atomic unit of current 6.623 617 82e-3 0.000 000 57e-3 A atomic unit of charge density 1.081 202 317e12 0.000 000 093e12 C m^-3 atomic unit of electric potential 27.211 3845 0.000 0023 V atomic unit of electric field 5.142 206 42e11 0.000 000 44e11 V m^-1 atomic unit of electric field gradient 9.717 361 82e21 0.000 000 83e21 V m^-2 atomic unit of electric dipole moment 8.478 353 09e-30 0.000 000 73e-30 C m atomic unit of electric quadrupole moment 4.486 551 24e-40 0.000 000 39e-40 C m^2 atomic unit of electric polarizablity 1.648 777 274e-41 0.000 000 016e-41 C^2 m^2 J^-1 atomic unit of 1st hyperpolarizablity 3.206 361 51e-53 0.000 000 28e-53 C^3 m^3 J^-2 atomic unit of 2nd hyperpolarizablity 6.235 3808e-65 0.000 0011e-65 C^4 m^4 J^-3 atomic unit of magn. flux density 2.350 517 42e5 0.000 000 20e5 T atomic unit of magn. dipole moment 1.854 801 90e-23 0.000 000 16e-23 J T^-1 atomic unit of magnetizability 7.891 036 60e-29 0.000 000 13e-29 J T^-2 atomic unit of permittivity 1.112 650 056...e-10 0 F m^-1 joule-kilogram relationship 1.112 650 056...e-17 0 kg joule-inverse meter relationship 5.034 117 20e24 0.000 000 86e24 m^-1 joule-hertz relationship 1.509 190 37e33 0.000 000 26e33 Hz joule-kelvin relationship 7.242 963e22 0.000 013e22 K joule-electron volt relationship 6.241 509 47e18 0.000 000 53e18 eV joule-atomic mass unit relationship 6.700 5361e9 0.000 0011e9 u joule-hartree relationship 2.293 712 57e17 0.000 000 39e17 E_h kilogram-joule relationship 8.987 551 787...e16 0 J kilogram-inverse meter relationship 4.524 438 91e41 0.000 000 77e41 m^-1 kilogram-hertz relationship 1.356 392 66e50 0.000 000 23e50 Hz kilogram-kelvin relationship 6.509 650e39 0.000 011e39 K kilogram-electron volt relationship 5.609 588 96e35 0.000 000 48e35 eV kilogram-atomic mass unit relationship 6.022 1415e26 0.000 0010e26 u kilogram-hartree relationship 2.061 486 05e34 0.000 000 35e34 E_h inverse meter-joule relationship 1.986 445 61e-25 0.000 000 34e-25 J inverse meter-kilogram relationship 2.210 218 81e-42 0.000 000 38e-42 kg inverse meter-hertz relationship 299 792 458 0 Hz inverse meter-kelvin relationship 1.438 7752e-2 0.000 0025e-2 K inverse meter-electron volt relationship 1.239 841 91e-6 0.000 000 11e-6 eV inverse meter-atomic mass unit relationship 1.331 025 0506e-15 0.000 000 0089e-15 u inverse meter-hartree relationship 4.556 335 252 760e-8 0.000 000 000 030e-8 E_h hertz-joule relationship 6.626 0693e-34 0.000 0011e-34 J hertz-kilogram relationship 7.372 4964e-51 0.000 0013e-51 kg hertz-inverse meter relationship 3.335 640 951...e-9 0 m^-1 hertz-kelvin relationship 4.799 2374e-11 0.000 0084e-11 K hertz-electron volt relationship 4.135 667 43e-15 0.000 000 35e-15 eV hertz-atomic mass unit relationship 4.439 821 667e-24 0.000 000 030e-24 u hertz-hartree relationship 1.519 829 846 006e-16 0.000 000 000 010e-16 E_h kelvin-joule relationship 1.380 6505e-23 0.000 0024e-23 J kelvin-kilogram relationship 1.536 1808e-40 0.000 0027e-40 kg kelvin-inverse meter relationship 69.503 56 0.000 12 m^-1 kelvin-hertz relationship 2.083 6644e10 0.000 0036e10 Hz kelvin-electron volt relationship 8.617 343e-5 0.000 015e-5 eV kelvin-atomic mass unit relationship 9.251 098e-14 0.000 016e-14 u kelvin-hartree relationship 3.166 8153e-6 0.000 0055e-6 E_h electron volt-joule relationship 1.602 176 53e-19 0.000 000 14e-19 J electron volt-kilogram relationship 1.782 661 81e-36 0.000 000 15e-36 kg electron volt-inverse meter relationship 8.065 544 45e5 0.000 000 69e5 m^-1 electron volt-hertz relationship 2.417 989 40e14 0.000 000 21e14 Hz electron volt-kelvin relationship 1.160 4505e4 0.000 0020e4 K electron volt-atomic mass unit relationship 1.073 544 171e-9 0.000 000 092e-9 u electron volt-hartree relationship 3.674 932 45e-2 0.000 000 31e-2 E_h atomic mass unit-joule relationship 1.492 417 90e-10 0.000 000 26e-10 J atomic mass unit-kilogram relationship 1.660 538 86e-27 0.000 000 28e-27 kg atomic mass unit-inverse meter relationship 7.513 006 608e14 0.000 000 050e14 m^-1 atomic mass unit-hertz relationship 2.252 342 718e23 0.000 000 015e23 Hz atomic mass unit-kelvin relationship 1.080 9527e13 0.000 0019e13 K atomic mass unit-electron volt relationship 931.494 043e6 0.000 080e6 eV atomic mass unit-hartree relationship 3.423 177 686e7 0.000 000 023e7 E_h hartree-joule relationship 4.359 744 17e-18 0.000 000 75e-18 J hartree-kilogram relationship 4.850 869 60e-35 0.000 000 83e-35 kg hartree-inverse meter relationship 2.194 746 313 705e7 0.000 000 000 015e7 m^-1 hartree-hertz relationship 6.579 683 920 721e15 0.000 000 000 044e15 Hz hartree-kelvin relationship 3.157 7465e5 0.000 0055e5 K hartree-electron volt relationship 27.211 3845 0.000 0023 eV hartree-atomic mass unit relationship 2.921 262 323e-8 0.000 000 019e-8 u""" # parse into a dict physical_constants = {} for line in txt.split('\n'): name = line[:55].rstrip().replace('magn.', 'magnetic') val = line[55:77].replace(' ', '').replace('...', '') val = float(val) uncert = line[77:99].replace(' ', '') uncert = float(uncert) units = line[99:].rstrip() physical_constants[name] = (val, units, uncert) def value(key): """value indexed by key""" return physical_constants[key][0] def unit(key): """unit indexed by key""" return physical_constants[key][1] def precision(key): """relative precision indexed by key""" return physical_constants[key][2] / physical_constants[key][0] def find(sub): """list all keys containing the string sub""" l_sub = string.lower(sub) result = [] for key in physical_constants: l_key = string.lower(key) if l_sub in l_key: result.append(key) result.sort() for key in result: print(key) # table is lacking some digits for exact values: calculate from definition c = value('speed of light in vacuum') mu0 = 4e-7 * pi epsilon0 = 1 / (mu0 * c * c) exact_values = { 'magnetic constant': (mu0, 'N A^-2', 0.0), 'electric constant': (epsilon0, 'F m^-1', 0.0), 'characteristic impedance of vacuum': (sqrt(mu0 / epsilon0), 'ohm', 0.0), # is that the definition? 'atomic unit of permittivity': (4 * epsilon0 * pi, 'F m^-1', 0.0), 'joule-kilogram relationship': (1 / (c * c), 'kg', 0.0), 'kilogram-joule relationship': (c * c, 'J', 0.0), 'hertz-inverse meter relationship': (1 / c, 'm^-1', 0.0) } # sanity check for key in exact_values: assert (exact_values[key][0] - value(key)) / value(key) < 1e-9 physical_constants.update(exact_values) # check update for key in exact_values: assert (exact_values[key][0] - value(key)) / value(key) == 0
adelq/thermopy
thermochem/codata.py
Python
bsd-3-clause
34,412
[ "Avogadro" ]
bbcb8e0c6529d37c4c69db8b29399d16697e89681c5b30978508ab7e0314420c
import logging import urllib from functools import partial from django.conf import settings from django.core.context_processors import csrf from django.core.exceptions import PermissionDenied from django.core.urlresolvers import reverse from django.contrib.auth.models import User from django.contrib.auth.decorators import login_required from django.http import Http404, HttpResponse, HttpResponseRedirect from django.shortcuts import redirect from mitxmako.shortcuts import render_to_response, render_to_string from django_future.csrf import ensure_csrf_cookie from django.views.decorators.cache import cache_control from markupsafe import escape from courseware import grades from courseware.access import has_access from courseware.courses import (get_courses, get_course_with_access, get_courses_by_university, sort_by_announcement) import courseware.tabs as tabs from courseware.masquerade import setup_masquerade from courseware.model_data import FieldDataCache from .module_render import toc_for_course, get_module_for_descriptor, get_module from courseware.models import StudentModule, StudentModuleHistory from course_modes.models import CourseMode from django_comment_client.utils import get_discussion_title from student.models import UserTestGroup, CourseEnrollment from util.cache import cache, cache_if_anonymous from xmodule.modulestore import Location from xmodule.modulestore.django import modulestore from xmodule.modulestore.exceptions import InvalidLocationError, ItemNotFoundError, NoPathToItem from xmodule.modulestore.search import path_to_location from xmodule.course_module import CourseDescriptor import shoppingcart import comment_client log = logging.getLogger("mitx.courseware") template_imports = {'urllib': urllib} def user_groups(user): """ TODO (vshnayder): This is not used. When we have a new plan for groups, adjust appropriately. """ if not user.is_authenticated(): return [] # TODO: Rewrite in Django key = 'user_group_names_{user.id}'.format(user=user) cache_expiration = 60 * 60 # one hour # Kill caching on dev machines -- we switch groups a lot group_names = cache.get(key) if settings.DEBUG: group_names = None if group_names is None: group_names = [u.name for u in UserTestGroup.objects.filter(users=user)] cache.set(key, group_names, cache_expiration) return group_names @ensure_csrf_cookie @cache_if_anonymous def courses(request): """ Render "find courses" page. The course selection work is done in courseware.courses. """ courses = get_courses(request.user, request.META.get('HTTP_HOST')) courses = sort_by_announcement(courses) return render_to_response("courseware/courses.html", {'courses': courses}) def render_accordion(request, course, chapter, section, field_data_cache): """ Draws navigation bar. Takes current position in accordion as parameter. If chapter and section are '' or None, renders a default accordion. course, chapter, and section are the url_names. Returns the html string """ # grab the table of contents user = User.objects.prefetch_related("groups").get(id=request.user.id) request.user = user # keep just one instance of User toc = toc_for_course(user, request, course, chapter, section, field_data_cache) context = dict([('toc', toc), ('course_id', course.id), ('csrf', csrf(request)['csrf_token']), ('due_date_display_format', course.due_date_display_format)] + template_imports.items()) return render_to_string('courseware/accordion.html', context) def get_current_child(xmodule): """ Get the xmodule.position's display item of an xmodule that has a position and children. If xmodule has no position or is out of bounds, return the first child. Returns None only if there are no children at all. """ if not hasattr(xmodule, 'position'): return None if xmodule.position is None: pos = 0 else: # position is 1-indexed. pos = xmodule.position - 1 children = xmodule.get_display_items() if 0 <= pos < len(children): child = children[pos] elif len(children) > 0: # Something is wrong. Default to first child child = children[0] else: child = None return child def redirect_to_course_position(course_module): """ Return a redirect to the user's current place in the course. If this is the user's first time, redirects to COURSE/CHAPTER/SECTION. If this isn't the users's first time, redirects to COURSE/CHAPTER, and the view will find the current section and display a message about reusing the stored position. If there is no current position in the course or chapter, then selects the first child. """ urlargs = {'course_id': course_module.descriptor.id} chapter = get_current_child(course_module) if chapter is None: # oops. Something bad has happened. raise Http404("No chapter found when loading current position in course") urlargs['chapter'] = chapter.url_name if course_module.position is not None: return redirect(reverse('courseware_chapter', kwargs=urlargs)) # Relying on default of returning first child section = get_current_child(chapter) if section is None: raise Http404("No section found when loading current position in course") urlargs['section'] = section.url_name return redirect(reverse('courseware_section', kwargs=urlargs)) def save_child_position(seq_module, child_name): """ child_name: url_name of the child """ for position, c in enumerate(seq_module.get_display_items(), start=1): if c.url_name == child_name: # Only save if position changed if position != seq_module.position: seq_module.position = position # Save this new position to the underlying KeyValueStore seq_module.save() def check_for_active_timelimit_module(request, course_id, course): """ Looks for a timing module for the given user and course that is currently active. If found, returns a context dict with timer-related values to enable display of time remaining. """ context = {} # TODO (cpennington): Once we can query the course structure, replace this with such a query timelimit_student_modules = StudentModule.objects.filter(student=request.user, course_id=course_id, module_type='timelimit') if timelimit_student_modules: for timelimit_student_module in timelimit_student_modules: # get the corresponding section_descriptor for the given StudentModel entry: module_state_key = timelimit_student_module.module_state_key timelimit_descriptor = modulestore().get_instance(course_id, Location(module_state_key)) timelimit_module_cache = FieldDataCache.cache_for_descriptor_descendents(course.id, request.user, timelimit_descriptor, depth=None) timelimit_module = get_module_for_descriptor(request.user, request, timelimit_descriptor, timelimit_module_cache, course.id, position=None) if timelimit_module is not None and timelimit_module.category == 'timelimit' and \ timelimit_module.has_begun and not timelimit_module.has_ended: location = timelimit_module.location # determine where to go when the timer expires: if timelimit_descriptor.time_expired_redirect_url is None: raise Http404("no time_expired_redirect_url specified at this location: {} ".format(timelimit_module.location)) context['time_expired_redirect_url'] = timelimit_descriptor.time_expired_redirect_url # Fetch the remaining time relative to the end time as stored in the module when it was started. # This value should be in milliseconds. remaining_time = timelimit_module.get_remaining_time_in_ms() context['timer_expiration_duration'] = remaining_time context['suppress_toplevel_navigation'] = timelimit_descriptor.suppress_toplevel_navigation return_url = reverse('jump_to', kwargs={'course_id': course_id, 'location': location}) context['timer_navigation_return_url'] = return_url return context def update_timelimit_module(user, course_id, field_data_cache, timelimit_descriptor, timelimit_module): """ Updates the state of the provided timing module, starting it if it hasn't begun. Returns dict with timer-related values to enable display of time remaining. Returns 'timer_expiration_duration' in dict if timer is still active, and not if timer has expired. """ context = {} # determine where to go when the exam ends: if timelimit_descriptor.time_expired_redirect_url is None: raise Http404("No time_expired_redirect_url specified at this location: {} ".format(timelimit_module.location)) context['time_expired_redirect_url'] = timelimit_descriptor.time_expired_redirect_url if not timelimit_module.has_ended: if not timelimit_module.has_begun: # user has not started the exam, so start it now. if timelimit_descriptor.duration is None: raise Http404("No duration specified at this location: {} ".format(timelimit_module.location)) # The user may have an accommodation that has been granted to them. # This accommodation information should already be stored in the module's state. timelimit_module.begin(timelimit_descriptor.duration) # the exam has been started, either because the student is returning to the # exam page, or because they have just visited it. Fetch the remaining time relative to the # end time as stored in the module when it was started. context['timer_expiration_duration'] = timelimit_module.get_remaining_time_in_ms() # also use the timed module to determine whether top-level navigation is visible: context['suppress_toplevel_navigation'] = timelimit_descriptor.suppress_toplevel_navigation return context def chat_settings(course, user): """ Returns a dict containing the settings required to connect to a Jabber chat server and room. """ domain = getattr(settings, "JABBER_DOMAIN", None) if domain is None: log.warning('You must set JABBER_DOMAIN in the settings to ' 'enable the chat widget') return None return { 'domain': domain, # Jabber doesn't like slashes, so replace with dashes 'room': "{ID}_class".format(ID=course.id.replace('/', '-')), 'username': "{USER}@{DOMAIN}".format( USER=user.username, DOMAIN=domain ), # TODO: clearly this needs to be something other than the username # should also be something that's not necessarily tied to a # particular course 'password': "{USER}@{DOMAIN}".format( USER=user.username, DOMAIN=domain ), } @login_required @ensure_csrf_cookie @cache_control(no_cache=True, no_store=True, must_revalidate=True) def index(request, course_id, chapter=None, section=None, position=None): """ Displays courseware accordion and associated content. If course, chapter, and section are all specified, renders the page, or returns an error if they are invalid. If section is not specified, displays the accordion opened to the right chapter. If neither chapter or section are specified, redirects to user's most recent chapter, or the first chapter if this is the user's first visit. Arguments: - request : HTTP request - course_id : course id (str: ORG/course/URL_NAME) - chapter : chapter url_name (str) - section : section url_name (str) - position : position in module, eg of <sequential> module (str) Returns: - HTTPresponse """ user = User.objects.prefetch_related("groups").get(id=request.user.id) request.user = user # keep just one instance of User course = get_course_with_access(user, course_id, 'load', depth=2) staff_access = has_access(user, course, 'staff') registered = registered_for_course(course, user) if not registered: # TODO (vshnayder): do course instructors need to be registered to see course? log.debug('User %s tried to view course %s but is not enrolled' % (user, course.location.url())) return redirect(reverse('about_course', args=[course.id])) masq = setup_masquerade(request, staff_access) try: field_data_cache = FieldDataCache.cache_for_descriptor_descendents( course.id, user, course, depth=2) course_module = get_module_for_descriptor(user, request, course, field_data_cache, course.id) if course_module is None: log.warning('If you see this, something went wrong: if we got this' ' far, should have gotten a course module for this user') return redirect(reverse('about_course', args=[course.id])) if chapter is None: return redirect_to_course_position(course_module) context = { 'csrf': csrf(request)['csrf_token'], 'accordion': render_accordion(request, course, chapter, section, field_data_cache), 'COURSE_TITLE': course.display_name_with_default, 'course': course, 'init': '', 'content': '', 'staff_access': staff_access, 'masquerade': masq, 'xqa_server': settings.MITX_FEATURES.get('USE_XQA_SERVER', 'http://xqa:server@content-qa.mitx.mit.edu/xqa') } # Only show the chat if it's enabled by the course and in the # settings. show_chat = course.show_chat and settings.MITX_FEATURES['ENABLE_CHAT'] if show_chat: context['chat'] = chat_settings(course, user) # If we couldn't load the chat settings, then don't show # the widget in the courseware. if context['chat'] is None: show_chat = False context['show_chat'] = show_chat chapter_descriptor = course.get_child_by(lambda m: m.url_name == chapter) if chapter_descriptor is not None: save_child_position(course_module, chapter) else: raise Http404('No chapter descriptor found with name {}'.format(chapter)) chapter_module = course_module.get_child_by(lambda m: m.url_name == chapter) if chapter_module is None: # User may be trying to access a chapter that isn't live yet if masq=='student': # if staff is masquerading as student be kinder, don't 404 log.debug('staff masq as student: no chapter %s' % chapter) return redirect(reverse('courseware', args=[course.id])) raise Http404 if section is not None: section_descriptor = chapter_descriptor.get_child_by(lambda m: m.url_name == section) if section_descriptor is None: # Specifically asked-for section doesn't exist if masq=='student': # if staff is masquerading as student be kinder, don't 404 log.debug('staff masq as student: no section %s' % section) return redirect(reverse('courseware', args=[course.id])) raise Http404 # cdodge: this looks silly, but let's refetch the section_descriptor with depth=None # which will prefetch the children more efficiently than doing a recursive load section_descriptor = modulestore().get_instance(course.id, section_descriptor.location, depth=None) # Load all descendants of the section, because we're going to display its # html, which in general will need all of its children section_field_data_cache = FieldDataCache.cache_for_descriptor_descendents( course_id, user, section_descriptor, depth=None) section_module = get_module_for_descriptor(request.user, request, section_descriptor, section_field_data_cache, course_id, position ) if section_module is None: # User may be trying to be clever and access something # they don't have access to. raise Http404 # Save where we are in the chapter save_child_position(chapter_module, section) # check here if this section *is* a timed module. if section_module.category == 'timelimit': timer_context = update_timelimit_module(user, course_id, student_module_cache, section_descriptor, section_module) if 'timer_expiration_duration' in timer_context: context.update(timer_context) else: # if there is no expiration defined, then we know the timer has expired: return HttpResponseRedirect(timer_context['time_expired_redirect_url']) else: # check here if this page is within a course that has an active timed module running. If so, then # add in the appropriate timer information to the rendering context: context.update(check_for_active_timelimit_module(request, course_id, course)) context['content'] = section_module.runtime.render(section_module, None, 'student_view').content else: # section is none, so display a message prev_section = get_current_child(chapter_module) if prev_section is None: # Something went wrong -- perhaps this chapter has no sections visible to the user raise Http404 prev_section_url = reverse('courseware_section', kwargs={'course_id': course_id, 'chapter': chapter_descriptor.url_name, 'section': prev_section.url_name}) context['content'] = render_to_string('courseware/welcome-back.html', {'course': course, 'chapter_module': chapter_module, 'prev_section': prev_section, 'prev_section_url': prev_section_url}) result = render_to_response('courseware/courseware.html', context) except Exception as e: if isinstance(e, Http404): # let it propagate raise # In production, don't want to let a 500 out for any reason if settings.DEBUG: raise else: log.exception("Error in index view: user={user}, course={course}," " chapter={chapter} section={section}" "position={position}".format( user=user, course=course, chapter=chapter, section=section, position=position )) try: result = render_to_response('courseware/courseware-error.html', {'staff_access': staff_access, 'course': course}) except: # Let the exception propagate, relying on global config to at # at least return a nice error message log.exception("Error while rendering courseware-error page") raise return result @ensure_csrf_cookie def jump_to_id(request, course_id, module_id): """ This entry point allows for a shorter version of a jump to where just the id of the element is passed in. This assumes that id is unique within the course_id namespace """ course_location = CourseDescriptor.id_to_location(course_id) items = modulestore().get_items( ['i4x', course_location.org, course_location.course, None, module_id], course_id=course_id ) if len(items) == 0: raise Http404("Could not find id = {0} in course_id = {1}. Referer = {2}". format(module_id, course_id, request.META.get("HTTP_REFERER", ""))) if len(items) > 1: log.warning("Multiple items found with id = {0} in course_id = {1}. Referer = {2}. Using first found {3}...". format(module_id, course_id, request.META.get("HTTP_REFERER", ""), items[0].location.url())) return jump_to(request, course_id, items[0].location.url()) @ensure_csrf_cookie def jump_to(request, course_id, location): """ Show the page that contains a specific location. If the location is invalid or not in any class, return a 404. Otherwise, delegates to the index view to figure out whether this user has access, and what they should see. """ # Complain if the location isn't valid try: location = Location(location) except InvalidLocationError: raise Http404("Invalid location") # Complain if there's not data for this location try: (course_id, chapter, section, position) = path_to_location(modulestore(), course_id, location) except ItemNotFoundError: raise Http404("No data at this location: {0}".format(location)) except NoPathToItem: raise Http404("This location is not in any class: {0}".format(location)) # choose the appropriate view (and provide the necessary args) based on the # args provided by the redirect. # Rely on index to do all error handling and access control. if chapter is None: return redirect('courseware', course_id=course_id) elif section is None: return redirect('courseware_chapter', course_id=course_id, chapter=chapter) elif position is None: return redirect('courseware_section', course_id=course_id, chapter=chapter, section=section) else: return redirect('courseware_position', course_id=course_id, chapter=chapter, section=section, position=position) @ensure_csrf_cookie def course_info(request, course_id): """ Display the course's info.html, or 404 if there is no such course. Assumes the course_id is in a valid format. """ course = get_course_with_access(request.user, course_id, 'load') staff_access = has_access(request.user, course, 'staff') masq = setup_masquerade(request, staff_access) # allow staff to toggle masquerade on info page return render_to_response('courseware/info.html', {'request': request, 'course_id': course_id, 'cache': None, 'course': course, 'staff_access': staff_access, 'masquerade': masq}) @ensure_csrf_cookie def static_tab(request, course_id, tab_slug): """ Display the courses tab with the given name. Assumes the course_id is in a valid format. """ course = get_course_with_access(request.user, course_id, 'load') tab = tabs.get_static_tab_by_slug(course, tab_slug) if tab is None: raise Http404 contents = tabs.get_static_tab_contents( request, course, tab ) if contents is None: raise Http404 staff_access = has_access(request.user, course, 'staff') return render_to_response('courseware/static_tab.html', {'course': course, 'tab': tab, 'tab_contents': contents, 'staff_access': staff_access, }) # TODO arjun: remove when custom tabs in place, see courseware/syllabus.py @ensure_csrf_cookie def syllabus(request, course_id): """ Display the course's syllabus.html, or 404 if there is no such course. Assumes the course_id is in a valid format. """ course = get_course_with_access(request.user, course_id, 'load') staff_access = has_access(request.user, course, 'staff') return render_to_response('courseware/syllabus.html', {'course': course, 'staff_access': staff_access, }) def registered_for_course(course, user): """ Return True if user is registered for course, else False """ if user is None: return False if user.is_authenticated(): return CourseEnrollment.is_enrolled(user, course.id) else: return False @ensure_csrf_cookie @cache_if_anonymous def course_about(request, course_id): if settings.MITX_FEATURES.get('ENABLE_MKTG_SITE', False): raise Http404 course = get_course_with_access(request.user, course_id, 'see_exists') registered = registered_for_course(course, request.user) if has_access(request.user, course, 'load'): course_target = reverse('info', args=[course.id]) else: course_target = reverse('about_course', args=[course.id]) show_courseware_link = (has_access(request.user, course, 'load') or settings.MITX_FEATURES.get('ENABLE_LMS_MIGRATION')) # Note: this is a flow for payment for course registration, not the Verified Certificate flow. registration_price = 0 in_cart = False reg_then_add_to_cart_link = "" if (settings.MITX_FEATURES.get('ENABLE_SHOPPING_CART') and settings.MITX_FEATURES.get('ENABLE_PAID_COURSE_REGISTRATION')): registration_price = CourseMode.min_course_price_for_currency(course_id, settings.PAID_COURSE_REGISTRATION_CURRENCY[0]) if request.user.is_authenticated(): cart = shoppingcart.models.Order.get_cart_for_user(request.user) in_cart = shoppingcart.models.PaidCourseRegistration.contained_in_order(cart, course_id) reg_then_add_to_cart_link = "{reg_url}?course_id={course_id}&enrollment_action=add_to_cart".format( reg_url=reverse('register_user'), course_id=course.id) return render_to_response('courseware/course_about.html', {'course': course, 'registered': registered, 'course_target': course_target, 'registration_price': registration_price, 'in_cart': in_cart, 'reg_then_add_to_cart_link': reg_then_add_to_cart_link, 'show_courseware_link': show_courseware_link}) @ensure_csrf_cookie @cache_if_anonymous def mktg_course_about(request, course_id): """ This is the button that gets put into an iframe on the Drupal site """ try: course = get_course_with_access(request.user, course_id, 'see_exists') except (ValueError, Http404) as e: # if a course does not exist yet, display a coming # soon button return render_to_response('courseware/mktg_coming_soon.html', {'course_id': course_id}) registered = registered_for_course(course, request.user) if has_access(request.user, course, 'load'): course_target = reverse('info', args=[course.id]) else: course_target = reverse('about_course', args=[course.id]) allow_registration = has_access(request.user, course, 'enroll') show_courseware_link = (has_access(request.user, course, 'load') or settings.MITX_FEATURES.get('ENABLE_LMS_MIGRATION')) course_modes = CourseMode.modes_for_course(course.id) return render_to_response('courseware/mktg_course_about.html', { 'course': course, 'registered': registered, 'allow_registration': allow_registration, 'course_target': course_target, 'show_courseware_link': show_courseware_link, 'course_modes': course_modes, }) def render_notifications(request, course, notifications): context = { 'notifications': notifications, 'get_discussion_title': partial(get_discussion_title, request=request, course=course), 'course': course, } return render_to_string('courseware/notifications.html', context) @login_required def news(request, course_id): course = get_course_with_access(request.user, course_id, 'load') notifications = comment_client.get_notifications(request.user.id) context = { 'course': course, 'content': render_notifications(request, course, notifications), } return render_to_response('courseware/news.html', context) @login_required @cache_control(no_cache=True, no_store=True, must_revalidate=True) def progress(request, course_id, student_id=None): """ User progress. We show the grade bar and every problem score. Course staff are allowed to see the progress of students in their class. """ course = get_course_with_access(request.user, course_id, 'load', depth=None) staff_access = has_access(request.user, course, 'staff') if student_id is None or student_id == request.user.id: # always allowed to see your own profile student = request.user else: # Requesting access to a different student's profile if not staff_access: raise Http404 student = User.objects.get(id=int(student_id)) # NOTE: To make sure impersonation by instructor works, use # student instead of request.user in the rest of the function. # The pre-fetching of groups is done to make auth checks not require an # additional DB lookup (this kills the Progress page in particular). student = User.objects.prefetch_related("groups").get(id=student.id) field_data_cache = FieldDataCache.cache_for_descriptor_descendents( course_id, student, course, depth=None) courseware_summary = grades.progress_summary(student, request, course, field_data_cache) grade_summary = grades.grade(student, request, course, field_data_cache) if courseware_summary is None: #This means the student didn't have access to the course (which the instructor requested) raise Http404 context = {'course': course, 'courseware_summary': courseware_summary, 'grade_summary': grade_summary, 'staff_access': staff_access, 'student': student, } context.update() return render_to_response('courseware/progress.html', context) @login_required def submission_history(request, course_id, student_username, location): """Render an HTML fragment (meant for inclusion elsewhere) that renders a history of all state changes made by this user for this problem location. Right now this only works for problems because that's all StudentModuleHistory records. """ course = get_course_with_access(request.user, course_id, 'load') staff_access = has_access(request.user, course, 'staff') # Permission Denied if they don't have staff access and are trying to see # somebody else's submission history. if (student_username != request.user.username) and (not staff_access): raise PermissionDenied try: student = User.objects.get(username=student_username) student_module = StudentModule.objects.get(course_id=course_id, module_state_key=location, student_id=student.id) except User.DoesNotExist: return HttpResponse(escape("User {0} does not exist.".format(student_username))) except StudentModule.DoesNotExist: return HttpResponse(escape("{0} has never accessed problem {1}".format(student_username, location))) history_entries = StudentModuleHistory.objects.filter( student_module=student_module ).order_by('-id') # If no history records exist, let's force a save to get history started. if not history_entries: student_module.save() history_entries = StudentModuleHistory.objects.filter( student_module=student_module ).order_by('-id') context = { 'history_entries': history_entries, 'username': student.username, 'location': location, 'course_id': course_id } return render_to_response('courseware/submission_history.html', context)
heran7/edx-platform
lms/djangoapps/courseware/views.py
Python
agpl-3.0
33,202
[ "VisIt" ]
9ce1e0763a215348fe62d2f90cdddd04f0d861dd80ce2ca4095426abcb57b551
# Copyright 2014, Brian Coca <bcoca@ansible.com> # Copyright 2017, Ken Celenza <ken@networktocode.com> # Copyright 2017, Jason Edelman <jason@networktocode.com> # Copyright 2017, Ansible Project # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import collections import itertools import math from jinja2.filters import environmentfilter from ansible.errors import AnsibleFilterError from ansible.module_utils import basic from ansible.module_utils.six import binary_type, text_type from ansible.module_utils.six.moves import zip, zip_longest from ansible.module_utils._text import to_native, to_text try: from jinja2.filters import do_unique HAS_UNIQUE = True except ImportError: HAS_UNIQUE = False try: from __main__ import display except ImportError: from ansible.utils.display import Display display = Display() @environmentfilter def unique(environment, a, case_sensitive=False, attribute=None): error = None try: if HAS_UNIQUE: c = set(do_unique(environment, a, case_sensitive=case_sensitive, attribute=attribute)) except Exception as e: if case_sensitive or attribute: raise AnsibleFilterError("Jinja2's unique filter failed and we cannot fall back to Ansible's version " "as it does not support the parameters supplied", orig_exc=e) else: display.warning('Falling back to Ansible unique filter as Jinaj2 one failed: %s' % to_text(e)) error = e if not HAS_UNIQUE or error: # handle Jinja2 specific attributes when using Ansible's version if case_sensitive or attribute: raise AnsibleFilterError("Ansible's unique filter does not support case_sensitive nor attribute parameters, " "you need a newer version of Jinja2 that provides their version of the filter.") if isinstance(a, collections.Hashable): c = set(a) else: c = [] for x in a: if x not in c: c.append(x) return c @environmentfilter def intersect(environment, a, b): if isinstance(a, collections.Hashable) and isinstance(b, collections.Hashable): c = set(a) & set(b) else: c = unique(environment, [x for x in a if x in b]) return c @environmentfilter def difference(environment, a, b): if isinstance(a, collections.Hashable) and isinstance(b, collections.Hashable): c = set(a) - set(b) else: c = unique(environment, [x for x in a if x not in b]) return c @environmentfilter def symmetric_difference(environment, a, b): if isinstance(a, collections.Hashable) and isinstance(b, collections.Hashable): c = set(a) ^ set(b) else: isect = intersect(environment, a, b) c = [x for x in union(environment, a, b) if x not in isect] return c @environmentfilter def union(environment, a, b): if isinstance(a, collections.Hashable) and isinstance(b, collections.Hashable): c = set(a) | set(b) else: c = unique(environment, a + b) return c def min(a): _min = __builtins__.get('min') return _min(a) def max(a): _max = __builtins__.get('max') return _max(a) def logarithm(x, base=math.e): try: if base == 10: return math.log10(x) else: return math.log(x, base) except TypeError as e: raise AnsibleFilterError('log() can only be used on numbers: %s' % str(e)) def power(x, y): try: return math.pow(x, y) except TypeError as e: raise AnsibleFilterError('pow() can only be used on numbers: %s' % str(e)) def inversepower(x, base=2): try: if base == 2: return math.sqrt(x) else: return math.pow(x, 1.0 / float(base)) except (ValueError, TypeError) as e: raise AnsibleFilterError('root() can only be used on numbers: %s' % str(e)) def human_readable(size, isbits=False, unit=None): ''' Return a human readable string ''' try: return basic.bytes_to_human(size, isbits, unit) except Exception: raise AnsibleFilterError("human_readable() can't interpret following string: %s" % size) def human_to_bytes(size, default_unit=None, isbits=False): ''' Return bytes count from a human readable string ''' try: return basic.human_to_bytes(size, default_unit, isbits) except Exception: raise AnsibleFilterError("human_to_bytes() can't interpret following string: %s" % size) def rekey_on_member(data, key, duplicates='error'): """ Rekey a dict of dicts on another member May also create a dict from a list of dicts. duplicates can be one of ``error`` or ``overwrite`` to specify whether to error out if the key value would be duplicated or to overwrite previous entries if that's the case. """ if duplicates not in ('error', 'overwrite'): raise AnsibleFilterError("duplicates parameter to rekey_on_member has unknown value: {0}".format(duplicates)) new_obj = {} if isinstance(data, collections.Mapping): iterate_over = data.values() elif isinstance(data, collections.Iterable) and not isinstance(data, (text_type, binary_type)): iterate_over = data else: raise AnsibleFilterError("Type is not a valid list, set, or dict") for item in iterate_over: if not isinstance(item, collections.Mapping): raise AnsibleFilterError("List item is not a valid dict") try: key_elem = item[key] except KeyError: raise AnsibleFilterError("Key {0} was not found".format(key)) except Exception as e: raise AnsibleFilterError(to_native(e)) # Note: if new_obj[key_elem] exists it will always be a non-empty dict (it will at # minimun contain {key: key_elem} if new_obj.get(key_elem, None): if duplicates == 'error': raise AnsibleFilterError("Key {0} is not unique, cannot correctly turn into dict".format(key_elem)) elif duplicates == 'overwrite': new_obj[key_elem] = item else: new_obj[key_elem] = item return new_obj class FilterModule(object): ''' Ansible math jinja2 filters ''' def filters(self): filters = { # general math 'min': min, 'max': max, # exponents and logarithms 'log': logarithm, 'pow': power, 'root': inversepower, # set theory 'unique': unique, 'intersect': intersect, 'difference': difference, 'symmetric_difference': symmetric_difference, 'union': union, # combinatorial 'product': itertools.product, 'permutations': itertools.permutations, 'combinations': itertools.combinations, # computer theory 'human_readable': human_readable, 'human_to_bytes': human_to_bytes, 'rekey_on_member': rekey_on_member, # zip 'zip': zip, 'zip_longest': zip_longest, } return filters
direvus/ansible
lib/ansible/plugins/filter/mathstuff.py
Python
gpl-3.0
7,996
[ "Brian" ]
e72e367e4175fdc8fd97d917ce38323622102eb9999981a10d2a36ed364b858f
# Copyright 2006 James Tauber and contributors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys from types import StringType import os import copy from cStringIO import StringIO import re try: from hashlib import md5 except: from md5 import md5 import logging import compiler from compiler.visitor import ASTVisitor from options import (all_compile_options, add_compile_options, get_compile_options, debug_options, speed_options, pythonic_options) escaped_subst = re.compile('@{{(!?[ a-zA-Z0-9_\.]*)}}') # See http://www.quackit.com/javascript/javascript_reserved_words.cfm JavaScript_Reserved_Words = frozenset(( 'break', 'case', 'comment', 'continue', 'default', 'delete', 'do', 'else', 'export', 'for', 'function', 'if', 'import', 'in', 'label', 'new', 'return', 'switch', 'this', 'typeof', 'var', 'void', 'while', 'with', )) ECMAScipt_Reserved_Words = frozenset(( 'catch', 'class', 'const', 'debugger', 'enum', 'extends', 'finally', 'super', 'throw', 'try', )) Java_Keywords = frozenset((# (Reserved by JavaScript) 'abstract', 'boolean', 'byte', 'char', 'double', 'false', 'final', 'float', 'goto', 'implements', 'instanceOf', 'int', 'interface', 'long', 'native', 'null', 'package', 'private', 'protected', 'public', 'short', 'static', 'synchronized', 'throws', 'transient', 'true', )) Other_JavaScript_Keywords = frozenset(( 'Anchor', 'Area', 'Array', 'Boolean', 'Button', 'Checkbox', 'Date', 'Document', 'Element', 'FileUpload', 'Form', 'Frame', 'Function', 'Hidden', 'History', 'Image', 'Infinity', 'JavaArray', 'JavaClass', 'JavaObject', 'JavaPackage', 'Link', 'Location', 'Math', 'MimeType', 'NaN', 'Navigator', 'Number', 'Object', 'Option', 'Packages', 'Password', 'Plugin', 'Radio', 'RegExp', 'Reset', 'Select', 'String', 'Submit', 'Text', 'Textarea', 'Window', 'alert', 'arguments', 'assign', 'blur', 'callee', 'caller', 'captureEvents', 'clearInterval', 'clearTimeout', 'close', 'closed', 'confirm', 'constructor', 'defaultStatus', 'document', 'escape', 'eval', 'find', 'focus', 'frames', 'getClass', 'history', 'home', 'innerHeight', 'innerWidth', 'isFinite', 'isNan', 'java', 'length', 'location', 'locationbar', 'menubar', 'moveBy', 'moveTo', 'name', 'navigate', 'navigator', 'netscape', 'onBlur', 'onError', 'onFocus', 'onLoad', 'onUnload', 'open', 'opener', 'outerHeight', 'outerWidth', 'pageXoffset', 'pageYoffset', 'parent', 'parseFloat', 'parseInt', 'personalbar', 'print', 'prompt', 'prototype', 'ref', 'releaseEvents', 'resizeBy', 'resizeTo', 'routeEvent', 'scroll', 'scrollBy', 'scrollTo', 'scrollbars', 'self', 'setInterval', 'setTimeout', 'status', 'statusbar', 'stop', 'sun', 'taint', 'toString', 'toolbar', 'top', 'unescape', 'untaint', 'unwatch', 'valueOf', 'watch', 'window', )) PYJSLIB_BUILTIN_FUNCTIONS=frozenset(( "__import__", "abs", "all", "any", "bool", "callable", "chr", "cmp", "delattr", "dir", "divmod", "enumerate", "filter", "float", "getattr", "hasattr", "hash", "hex", "isinstance", "issubclass", "iter", "len", "map", "max", "min", "oct", "open", "ord", "pow", "range", "reduce", "repr", "reversed", "round", "setattr", "sorted", "staticmethod", "str", "sum", "super", "type", "xrange", "zip", # internal mappings needed "__empty_dict", "next_hash_id", "__hash", "wrapped_next", "__iter_prepare", "__wrapped_next", "printFunc", "debugReport", "_isinstance", "op_add", "op_sub", "isObject", "toJSObjects", "_errorMapping", "TryElse", "sprintf", "get_pyjs_classtype", "isUndefined", "_create_class", "_del", "op_is", "op_eq", "op_or", "op_and", "op_uadd", "op_usub", "op_mul", "op_div", "op_truediv", "op_pow", "op_invert", "op_bitshiftleft", "op_bitshiftright", "op_bitand2", "op_bitand", "op_bitxor", "op_bitxor2", "op_bitor2", "op_bitor", "op_floordiv", "op_mod", "__op_add", "__op_sub", "__setslice", "slice", "__delslice", "___import___", "__import_all__", "_globals", "_handle_exception", )) PYJSLIB_BUILTIN_CLASSES=[ "ArithmeticError", "AssertionError", "AttributeError", "BaseException", "Exception", "GeneratorExit", "ImportError", "IndexError", "KeyError", "KeyboardInterrupt", "LookupError", "NameError", "NotImplemented", # is in fact an instance "NotImplementedError", "NotImplementedType", "RuntimeError", "StandardError", "StopIteration", "TypeError", "ValueError", "ZeroDivisionError", "basestring", "dict", "frozenset", "int", "list", "long", "object", "property", "set", "tuple", ] PYJSLIB_BUILTIN_MAPPING = {\ 'True' : 'true', 'False': 'false', 'None': 'null', } SCOPE_KEY = 0 BIND_TYPES_NUMERIC = { "func": 0, "bound": 1, "class": 2, "static": 3, } # Variable names that should be remapped in functions/methods # arguments -> arguments_ # arguments_ -> arguments__ # etc. # arguments is one of Other_JavaScript_Keywords, but is used # in function/method initialization and therefore forbidden pyjs_vars_remap_names = ['arguments', 'final', 'char'] # to pass lint pyjs_vars_remap = {} for a in pyjs_vars_remap_names: pyjs_vars_remap[a] = '$$' + a for a in JavaScript_Reserved_Words: pyjs_vars_remap[a] = '$$' + a for a in ECMAScipt_Reserved_Words: pyjs_vars_remap[a] = '$$' + a # Attributes that should be remapped in classes pyjs_attrib_remap_names = [\ 'prototype', 'call', 'apply', 'constructor', # Specifically for Chrome, which doesn't set the name attribute of a _function_ # http://code.google.com/p/chromium/issues/detail?id=12871 'name', # collisions between javascript/python 'split', 'replace', ] pyjs_attrib_remap = {} for a in pyjs_attrib_remap_names: pyjs_attrib_remap[a] = '$$' + a for a in JavaScript_Reserved_Words: pyjs_attrib_remap[a] = '$$' + a for a in ECMAScipt_Reserved_Words: pyjs_attrib_remap[a] = '$$' + a def bracket_fn(s): return s # "(%s)" % s # pass in the compiler module (lib2to3 pgen or "standard" python one) # and patch transformer. see http://bugs.python.org/issue6978 def monkey_patch_broken_transformer(compiler): if compiler.__name__ != 'compiler': return # don't patch pgen.lib2to3.compiler.transformer! # assumes that compiler.transformer imports all these extractLineNo = compiler.transformer.extractLineNo token = compiler.transformer.token symbol = compiler.transformer.symbol Subscript = compiler.transformer.Subscript Tuple = compiler.transformer.Tuple Ellipsis = compiler.transformer.Ellipsis Sliceobj = compiler.transformer.Sliceobj # Bugfix compiler.transformer.Transformer.com_subscriptlist def com_subscriptlist(self, primary, nodelist, assigning): # slicing: simple_slicing | extended_slicing # simple_slicing: primary "[" short_slice "]" # extended_slicing: primary "[" slice_list "]" # slice_list: slice_item ("," slice_item)* [","] # backwards compat slice for '[i:j]' if len(nodelist) == 2: sub = nodelist[1] if (sub[1][0] == token.COLON or \ (len(sub) > 2 and sub[2][0] == token.COLON)) and \ sub[-1][0] != symbol.sliceop: return self.com_slice(primary, sub, assigning) subscripts = [] for i in range(1, len(nodelist), 2): subscripts.append(self.com_subscript(nodelist[i])) if len(nodelist) > 2: tulplesub = [sub for sub in subscripts \ if not (isinstance(sub, Ellipsis) or \ isinstance(sub, Sliceobj))] if len(tulplesub) == len(subscripts): subscripts = [Tuple(subscripts)] return Subscript(primary, assigning, subscripts, lineno=extractLineNo(nodelist)) compiler.transformer.Transformer.com_subscriptlist = com_subscriptlist re_return = re.compile(r'\breturn\b') class __Pyjamas__(object): console = "console" native_js_funcs = [] @classmethod def register_native_js_func(cls, name, func): def native(self, translator, node, current_klass, is_statement=False): if len(node.args) != 1: raise TranslationError( "%s function requires one argument" % name, node.node) if ( isinstance(node.args[0], translator.ast.Const) and isinstance(node.args[0].value, str) ): translator.ignore_debug = True unescape = lambda content: translator.translate_escaped_names(content, current_klass) converted = func(node.args[0].value, unescape=unescape, translator=translator, current_klass=current_klass, is_statement=is_statement) return converted, re_return.search(converted) is not None else: raise TranslationError( "%s function only supports constant strings" % name, node.node) cls.native_js_funcs.append(name) setattr(cls, name, native) def wnd(self, translator, node, *args, **kwargs): if len(node.args) != 0: raise TranslationError( "wnd function doesn't support arguments", node.node) translator.ignore_debug = True return '$wnd', False def doc(self, translator, node, *args, **kwargs): if len(node.args) != 0: raise TranslationError( "doc function doesn't support arguments", node.node) translator.ignore_debug = True return '$doc', False def jsinclude(self, translator, node, *args, **kwargs): if len(node.args) != 1: raise TranslationError( "jsinclude function requires one argument", node.node) if ( isinstance(node.args[0], translator.ast.Const) and isinstance(node.args[0].value, str) ): try: data = open(node.args[0].value, 'r').read() except IOError, e: raise TranslationError( "Cannot include file '%s': %s" % (node.args[0].value, e), node.node) translator.ignore_debug = True return data, False else: raise TranslationError( "jsinclude function only supports constant strings", node.node) def jsimport(self, translator, node, *args, **kwargs): # jsimport(path, mode, location) # mode = [default|static|dynamic] (default: depends on build argument -m) # location = [early|middle|late] (only relevant for static) if len(node.args) == 0 or len(node.args) > 3: raise TranslationError( "jsimport function requires at least one, and at most three arguments", node.node) for arg in node.args: if not isinstance(arg, translator.ast.Const): raise TranslationError( "jsimport function only supports constant arguments", node.node) if not isinstance(node.args[0].value, str): raise TranslationError( "jsimport path argument must be a string", node.node) path = node.args[0].value if len(node.args) < 2: mode = 'default' else: if isinstance(node.args[1].value, str): mode = node.args[1].value else: raise TranslationError( "jsimport path argument must be a string", node.node) if not mode in ['default', 'static', 'dynamic']: raise TranslationError( "jsimport mode argument must be default, static or dynamic", node.node) if len(node.args) < 3: location = 'middle' else: if isinstance(node.args[2].value, str): location = node.args[2].value else: raise TranslationError( "jsimport path argument must be a string", node.node) if not location in ['early', 'middle', 'late']: raise TranslationError( "jsimport location argument must be early, middle or late", node.node) translator.add_imported_js(path, mode, location) translator.ignore_debug = True return '', False def debugger(self, translator, node, *args, **kwargs): if len(node.args) != 0: raise TranslationError( "debugger function doesn't support arguments", node.node) translator.ignore_debug = True return 'debugger', False def setCompilerOptions(self, translator, node, *args, **kwargs): global speed_options, pythonic_options for arg in node.args: if not isinstance(arg, translator.ast.Const) or not isinstance(arg.value, str): raise TranslationError( "jsimport function only supports constant string arguments", node.node) option = arg.value if translator.decorator_compiler_options.has_key(option): for var, val in translator.decorator_compiler_options[option]: setattr(translator, var, val) elif option == "Speed": for var in speed_options: setattr(translator, var, speed_options[var]) elif option == "Strict": for var in pythonic_options: setattr(translator, var, pythonic_options[var]) else: raise TranslationError( "setCompilerOptions invalid option '%s'" % option, node.node) translator.ignore_debug = True return '', False def INT(self, translator, node, *args, **kwargs): if len(node.args) != 1: raise TranslationError( "INT function requires one argument", node.node) expr = translator.expr(node.args[0], None) opt_var = translator.decorator_compiler_options['NumberClasses'][0][0] if getattr(translator, opt_var): return "new $p['int'](%s)" % expr, False return expr, False def native_js_func(func): __Pyjamas__.register_native_js_func(func.__name__, func) return func @native_js_func def JS(content, unescape, **kwargs): return unescape(content) __pyjamas__ = __Pyjamas__() class __Future__(object): def division(self, translator): translator.future_division = True __future__ = __Future__() # This is taken from the django project. # Escape every ASCII character with a value less than 32. JS_ESCAPES = ( ('\\', r'\x5C'), ('\'', r'\x27'), ('"', r'\x22'), ('>', r'\x3E'), ('<', r'\x3C'), ('&', r'\x26'), (';', r'\x3B') ) + tuple([('%c' % z, '\\x%02X' % z) for z in range(32)]) def escapejs(value): """Hex encodes characters for use in JavaScript strings.""" for bad, good in JS_ESCAPES: value = value.replace(bad, good) return value class YieldVisitor(ASTVisitor): has_yield = False def visitYield(self, node, *args): self.has_yield = True class GeneratorExitVisitor(YieldVisitor): has_yield = False def visitReturn(self, node, *args): self.has_yield = True class Klass: klasses = {} def __init__(self, name, name_scope): self.name = name self.name_scope = name_scope self.klasses[name] = self self.functions = set() def set_base(self, base_name): self.base = self.klasses.get(base_name) def add_function(self, function_name): self.functions.add(function_name) class TranslationError(Exception): def __init__(self, msg, node='', module_name=''): if node: lineno = node.lineno else: lineno = "Unknown" self.msg = msg self.node = node self.module_name = module_name self.lineno = lineno Exception.__init__(self, "%s line %s:\n%s\n%s" % (module_name, lineno, msg, node)) def __str__(self): return self.args[0] def strip_py(name): return name class Translator(object): decorator_compiler_options = {\ 'Debug': [('debug', True)], 'noDebug': [('debug', False)], 'PrintStatements': [('print_statements', True)], 'noPrintStatements': [('print_statements', False)], 'FunctionArgumentChecking': [('function_argument_checking', True)], 'noFunctionArgumentChecking': [('function_argument_checking', False)], 'AttributeChecking': [('attribute_checking', True)], 'noAttributeChecking': [('attribute_checking', False)], 'GetattrSupport': [('getattr_support', True)], 'noGetattrSupport': [('getattr_support', False)], 'BoundMethods': [('bound_methods', True)], 'noBoundMethods': [('bound_methods', False)], 'Descriptors': [('descriptors', True)], 'noDescriptors': [('descriptors', False)], 'SourceTracking': [('source_tracking', True)], 'noSourceTracking': [('source_tracking', False)], 'LineTracking': [('line_tracking', True)], 'noLineTracking': [('line_tracking', False)], 'StoreSource': [('store_source', True)], 'noStoreSource': [('store_source', False)], 'noInlineBool': [('inline_bool', False)], 'InlineBool': [('inline_bool', True)], 'noInlineLen': [('inline_len', False)], 'InlineLen': [('inline_len', True)], 'noInlineEq': [('inline_eq', False)], 'InlineEq': [('inline_eq', True)], 'noInlineCmp': [('inline_cmp', False)], 'InlineCmp': [('inline_cmp', True)], 'noInlineGetItem': [('inline_getitem', False)], 'InlineGetItem': [('inline_getitem', True)], 'noInlineCode': [('inline_bool', False),('inline_len', False),('inline_eq', False), ('inline_cmp', False), ('inline_getitem', False)], 'InlineCode': [('inline_bool', True),('inline_len', True),('inline_eq', True), ('inline_cmp', True), ('inline_getitem', True)], 'noOperatorFuncs': [('operator_funcs', False)], 'OperatorFuncs': [('operator_funcs', True)], 'noNumberClasses': [('number_classes', False)], 'NumberClasses': [('number_classes', True)], } def __init__(self, compiler, module_name, module_file_name, src, mod, output, dynamic=0, findFile=None, **kw): monkey_patch_broken_transformer(compiler) self.compiler = compiler self.ast = compiler.ast self.js_module_name = self.jsname("variable", module_name) if module_name: self.module_prefix = "$m." else: self.module_prefix = "" self.module_name = module_name src = src.replace("\r\n", "\n") src = src.replace("\n\r", "\n") src = src.replace("\r", "\n") self.src = src.split("\n") self.output = output self.dynamic = dynamic self.findFile = findFile self.set_compile_options(kw) # compile options self.future_division = False self.imported_modules = [] self.imported_js = [] self.is_class_definition = False self.local_prefix = None self.track_lines = {} self.stacksize_depth = 0 self.option_stack = [] self.lookup_stack = [{}] self.indent_level = 0 self.__unique_ids__ = {} self.try_depth = -1 self.is_generator = False self.generator_states = [] self.state_max_depth = len(self.generator_states) self.constant_int = {} self.constant_long = {} self.top_level = True PYJSLIB_BUILTIN_MAPPING['__file__'] = "'%s'" % module_file_name self.w( self.spacing() + "/* start module: %s */" % module_name) if not '.' in module_name: #if module_name != self.jsname(module_name): # raise TranslationError( # "reserved word used for top-level module %r" % module_name, # mod, self.module_name) if self.js_module_name in ['pyjslib', 'sys']: self.w( self.spacing() + 'var %s;' % self.js_module_name) self.parent_module_name = None else: self.parent_module_name = '.'.join(module_name.split('.')[:-1]) if module_file_name.endswith('__init__.py'): self.import_context = "'%s'" % module_name self.relative_import_context = module_name elif self.parent_module_name: self.import_context = "'%s'" % self.parent_module_name self.relative_import_context = self.parent_module_name else: self.import_context = "null" self.relative_import_context = None self.w( self.indent() + "$pyjs.loaded_modules['%s'] = function (__mod_name__) {" % module_name) self.w( self.spacing() + "if($pyjs.loaded_modules['%s'].__was_initialized__) return $pyjs.loaded_modules['%s'];"% (module_name, module_name)) if self.parent_module_name: self.w( self.spacing() + "if(typeof $pyjs.loaded_modules['%s'] == 'undefined' || !$pyjs.loaded_modules['%s'].__was_initialized__) @{{___import___}}('%s', null);"% (self.parent_module_name, self.parent_module_name, self.parent_module_name)) parts = self.js_module_name.split('.') if len(parts) > 1: self.w( self.spacing() + 'var %s = $pyjs.loaded_modules["%s"];' % (parts[0], module_name.split('.')[0])) if self.js_module_name in ['pyjslib', 'sys']: self.w( self.spacing() + 'var %s = %s = $pyjs.loaded_modules["%s"];' % (self.module_prefix[:-1], self.js_module_name, module_name,)) else: self.w( self.spacing() + 'var %s = $pyjs.loaded_modules["%s"];' % (self.module_prefix[:-1], module_name,)) self.w( self.spacing() + self.module_prefix + '__repr__ = function() { return "<module: %s>"; };' % (module_name)) self.w( self.spacing() + self.module_prefix + "__was_initialized__ = true;") self.w( self.spacing() + "if ((__mod_name__ === null) || (typeof __mod_name__ == 'undefined')) __mod_name__ = '%s';" % (module_name)) lhs = self.scopeName('__name__', 0, False) self.w( self.spacing() + "%s = __mod_name__;" % (lhs)) if self.source_tracking: self.w( self.spacing() + "%s__track_lines__ = new Array();" % self.module_prefix) name = module_name.split(".") if len(name) > 1: jsname = self.jsname('variable', name[-1]) self.w( self.spacing() + "$pyjs.loaded_modules['%s']['%s'] = $pyjs.loaded_modules['%s'];" % ( '.'.join(name[:-1]), jsname, module_name, )) if self.attribute_checking and not module_name in ['sys', 'pyjslib']: attribute_checking = True self.w( self.indent() + 'try {') else: attribute_checking = False save_output = self.output self.output = StringIO() mod.lineno = 1 self.track_lineno(mod, True) for child in mod.node: self.has_js_return = False self.has_yield = False self.is_generator = False self.track_lineno(child) assert self.top_level if isinstance(child, self.ast.Function): self._function(child, None) elif isinstance(child, self.ast.Class): self._class(child) elif isinstance(child, self.ast.Import): self._import(child, None, True) elif isinstance(child, self.ast.From): self._from(child, None, True) elif isinstance(child, self.ast.Discard): self._discard(child, None) elif isinstance(child, self.ast.Assign): self._assign(child, None) elif isinstance(child, self.ast.AugAssign): self._augassign(child, None) elif isinstance(child, self.ast.If): self._if(child, None) elif isinstance(child, self.ast.For): self._for(child, None) elif isinstance(child, self.ast.While): self._while(child, None) elif isinstance(child, self.ast.Subscript): self._subscript_stmt(child, None) elif isinstance(child, self.ast.Global): self._global(child, None) elif isinstance(child, self.ast.Printnl): self._print(child, None) elif isinstance(child, self.ast.Print): self._print(child, None) elif isinstance(child, self.ast.TryExcept): self._tryExcept(child, None) elif isinstance(child, self.ast.TryFinally): self._tryFinally(child, None) elif isinstance(child, self.ast.Raise): self._raise(child, None) elif isinstance(child, self.ast.Stmt): self._stmt(child, None, True) elif isinstance(child, self.ast.AssAttr): self._assattr(child, None) elif isinstance(child, self.ast.AssName): self._assname(child, None) elif isinstance(child, self.ast.AssTuple): for node in child.nodes: self._stmt(node, None) elif isinstance(child, self.ast.Slice): self.w( self.spacing() + self._slice(child, None)) else: raise TranslationError( "unsupported type (in __init__)", child, self.module_name) captured_output = self.output.getvalue() self.output = save_output if self.source_tracking and self.store_source: for l in self.track_lines.keys(): self.w( self.spacing() + '''%s__track_lines__[%d] = "%s";''' % (self.module_prefix, l, self.track_lines[l].replace('"', '\"')), translate=False) self.w( self.local_js_vars_decl([])) if captured_output.find("@CONSTANT_DECLARATION@") >= 0: captured_output = captured_output.replace("@CONSTANT_DECLARATION@", self.constant_decl()) else: self.w( self.constant_decl()) if captured_output.find("@ATTRIB_REMAP_DECLARATION@") >= 0: captured_output = captured_output.replace("@ATTRIB_REMAP_DECLARATION@", self.attrib_remap_decl()) self.w( captured_output, False) if attribute_checking: self.w( self.dedent() + "} catch ($pyjs_attr_err) {throw @{{_errorMapping}}($pyjs_attr_err);};") self.w( self.spacing() + "return this;") self.w( self.dedent() + "}; /* end %s */" % module_name) self.w( "\n") self.w( self.spacing() + "/* end module: %s */" % module_name) self.w( "\n") # print out the deps and check for wrong imports if self.imported_modules: self.w( '/*') self.w( 'PYJS_DEPS: %s' % self.imported_modules) self.w( '*/') # print out the imported js if self.imported_js: self.w( '/*') self.w( 'PYJS_JS: %s' % repr(self.imported_js)) self.w( '*/') def set_compile_options(self, opts): opts = dict(all_compile_options, **opts) for opt, value in opts.iteritems(): if opt in all_compile_options: setattr(self, opt, value) else: raise Exception("Translator got an unknown option %s" % opt) self.ignore_debug = False self.inline_bool = self.inline_code self.inline_len = self.inline_code self.inline_eq = self.inline_code self.inline_cmp = self.inline_code self.inline_getitem = self.inline_code if self.number_classes: self.operator_funcs = True def w(self, txt, newline=True, output=None, translate=True): if translate and txt: txt = self.translate_escaped_names(txt, None) # TODO: current_klss output = output or self.output assert(isinstance(newline, bool)) if newline: if txt is None: print >> self.output return print >> self.output, txt else: print >> self.output, txt, def uniqid(self, prefix = ""): if not self.__unique_ids__.has_key(prefix): self.__unique_ids__[prefix] = 0 self.__unique_ids__[prefix] += 1 return "%s%d" % (prefix, self.__unique_ids__[prefix]) def spacing(self): return "\t" * self.indent_level def indent(self): spacing = self.spacing() self.indent_level += 1 return spacing def dedent(self): if self.indent_level == 0: raise TranslationError("Dedent error", None, self.module_name) self.indent_level -= 1 return self.spacing() def push_options(self): self.option_stack.append((\ self.debug, self.print_statements, self.function_argument_checking, self.attribute_checking, self.getattr_support, self.bound_methods, self.descriptors, self.source_tracking, self.line_tracking, self.store_source, self.inline_bool, self.inline_eq, self.inline_len, self.inline_cmp, self.inline_getitem, self.operator_funcs, self.number_classes, )) def pop_options(self): (\ self.debug, self.print_statements, self.function_argument_checking, self.attribute_checking, self.getattr_support, self.bound_methods, self.descriptors, self.source_tracking, self.line_tracking, self.store_source, self.inline_bool, self.inline_eq, self.inline_len, self.inline_cmp, self.inline_getitem, self.operator_funcs, self.number_classes, ) = self.option_stack.pop() def parse_decorators(self, node, funcname, current_class = None, is_method = False, bind_type = None): if node.decorators is None: return False, False, '%s' self.push_lookup() self.add_lookup('variable', '%s', '%s') code = '%s' staticmethod = False classmethod = False lineno=node.lineno if is_method: bind_type = bind_type or "bound" def add_callfunc(code, d, generic=True): tnode = self.ast.CallFunc(d, [self.ast.Name('%s')], star_args=None, dstar_args=None, lineno=lineno) code = code % self._callfunc_code(tnode, None) if is_method and (bind_type == "bound") and generic: try: bind_type_num = BIND_TYPES_NUMERIC[bind_type] except KeyError: raise TranslationError("Unknown bind type: %s" % bind_type, node) code = "$pyjs__decorated_method('%(method_name)s', %(code)s, %(bind_type)s)" % \ { "method_name": node.name, "code": code, "bind_type": bind_type_num } return code for d in node.decorators: if isinstance(d, self.ast.Getattr): if isinstance(d.expr, self.ast.Name): if d.expr.name == 'compiler': raise TranslationError( "The @compiler decorator is deprecated. Use from __pyjamas__ import setCompilerOptions", node, self.module_name) if d.attrname in ("setter", "getter", "deleter"): code = add_callfunc(code, d, generic=False) else: code = add_callfunc(code, d) else: code = add_callfunc(code, d) elif isinstance(d, self.ast.Name): if d.name == 'staticmethod': staticmethod = True elif d.name == 'classmethod': classmethod = True elif d.name == 'property': code = add_callfunc(code, d, generic=False) else: code = add_callfunc(code, d) else: raise TranslationError( "Unsupported decorator '%s'" % d, node, self.module_name) self.pop_lookup() if code != '%s': code = code % "@{{staticmethod}}(%s)" if staticmethod: code = "@{{staticmethod}}(%s)" % code return (staticmethod, classmethod, code) # Join an list into a variable with optional attributes def attrib_join(self, splitted): if not isinstance(splitted, list): raise TranslationError("Invalid splitted attr '%s'" % splitted) attr = [] if splitted[0][0] in ["'", '"']: attr.append(splitted[0][1:-1]) else: attr.append(splitted[0]) for word in splitted[1:]: if word[0] in ["'", '"']: word = word[1:-1] if word in pyjs_attrib_remap: attr.append("'%s'" % pyjs_attrib_remap[word]) elif word.find('(') >= 0: print 'attrib_join:', splitted, attr, word attr.append(word) else: attr.append("'%s'" % word) if len(attr) == 1: return attr[0] return "%s%s" % (attr[0], ('[' + "][".join(attr[1:]) + ']')) def vars_remap(self, word): if word in pyjs_vars_remap: return pyjs_vars_remap[word] return word # Map a word to a valid attribute def attrib_remap(self, word): attr = [] words = word.split('.') if len(words) == 1: if word in pyjs_attrib_remap: return pyjs_attrib_remap[word] return word raise RuntimeError("attrib_remap %s" % words) def push_lookup(self, scope = None): if scope is None: scope = {} self.lookup_stack.append(scope) def pop_lookup(self): return self.lookup_stack.pop() def jsname(self, name_type, jsname): words = jsname.split('.') if name_type != 'builtin': words[0] = self.vars_remap(words[0]) if len(words) == 0: return words[0] return self.attrib_join(words) def add_lookup(self, name_type, pyname, jsname, depth = -1): jsname = self.jsname(name_type, jsname) if self.local_prefix is not None: if jsname.find(self.local_prefix) != 0: jsname = self.jsname(name_type, "%s.%s" % (self.local_prefix, jsname)) if self.lookup_stack[depth].has_key(pyname): name_type = self.lookup_stack[depth][pyname][0] if self.module_name != 'pyjslib' or pyname != 'int': self.lookup_stack[depth][pyname] = (name_type, pyname, jsname) return jsname def lookup(self, name): # builtin # import # class # function # variable name_type = None pyname = name jsname = None max_depth = depth = len(self.lookup_stack) - 1 while depth >= 0: if self.lookup_stack[depth].has_key(name): name_type, pyname, jsname = self.lookup_stack[depth][name] break depth -= 1 if depth < 0: if name in PYJSLIB_BUILTIN_FUNCTIONS: name_type = 'builtin' pyname = name jsname = self.jsname("variable", "$p['%s']" % self.attrib_remap(name)) elif name in PYJSLIB_BUILTIN_CLASSES: name_type = 'builtin' pyname = name if not self.number_classes: if pyname in ['int', 'long']: name = 'float_int' jsname = self.jsname("variable", "$p['%s']" % self.attrib_remap(name)) elif PYJSLIB_BUILTIN_MAPPING.has_key(name): name_type = 'builtin' pyname = name jsname = PYJSLIB_BUILTIN_MAPPING[name] is_local = (name_type is not None) and \ (max_depth > 0) and (max_depth == depth) #if self.create_locals: # print "lookup", name_type, pyname, jsname, depth, is_local #if self.create_locals and is_local and \ # self.is_local_name(jsname, pyname, name_type, []): #if depth == max_depth and jsname is not None and name_type not in \ # ['builtin', '__pyjamas__', '__javascript__', 'global']: # print "name_type", name_type, jsname # jsname = "$l." + jsname return (name_type, pyname, jsname, depth, (name_type is not None) and (max_depth > 0) and (max_depth == depth)) def translate_escaped_names(self, txt, current_klass): """ escape replace names """ l = escaped_subst.split(txt) txt = l[0] for i in xrange(1, len(l)-1, 2): varname = l[i].strip() if varname.startswith('!'): txt += varname[1:] else: name_type, pyname, jsname, depth, is_local = self.lookup(varname) if name_type is None: substname = self.scopeName(varname, depth, is_local) else: substname = jsname txt += substname txt += l[i+1] return txt def scopeName(self, name, depth, local): if local: return name while depth >= 0: scopeName = self.lookup_stack[depth].get(SCOPE_KEY, None) if scopeName is not None: return scopeName + name depth -= 1 return self.modpfx() + name def attrib_remap_decl(self): s = self.spacing() lines = [] module_prefix = self.module_prefix remap = pyjs_attrib_remap.keys() remap.sort() lines.append("%(s)svar attrib_remap = %(module_prefix)sattrib_remap = %(remap)s;" % locals()) remap = pyjs_vars_remap.keys() remap.sort() lines.append("%(s)svar var_remap = %(module_prefix)svar_remap = %(remap)s;" % locals()) return "\n".join(lines) def constant_decl(self): s = self.spacing() lines = [] for name in self.constant_int: lines.append("%(s)svar $constant_int_%(name)s = new $p['int'](%(name)s);" % locals()) for name in self.constant_long: lines.append("%(s)svar $constant_long_%(name)s = new $p['long'](%(name)s);" % locals()) return "\n".join(lines) def is_local_name(self, jsname, pyname, nametype, ignore_py_vars): return ( not jsname.find('[') >= 0 and not pyname in ignore_py_vars and not nametype in ['__pyjamas__', '__javascript__', 'global'] ) def local_js_vars_decl(self, ignore_py_vars): names = [] for name in self.lookup_stack[-1].keys(): nametype = self.lookup_stack[-1][name][0] pyname = self.lookup_stack[-1][name][1] jsname = self.lookup_stack[-1][name][2] if self.is_local_name(jsname, pyname, nametype, ignore_py_vars): names.append(jsname) if len(names) > 0: return self.spacing() + "var %s;" % ','.join(names) return '' def add_imported_js(self, path, mode, location): self.imported_js.append((path, mode, location)) def add_imported_module(self, importName): names = importName.split(".") if not importName in self.imported_modules: self.imported_modules.append(importName) if importName.endswith('.js'): return # Add all parent modules _importName = '' for name in names: _importName += name if not _importName in self.imported_modules: self.imported_modules.append(_importName) _importName += '.' __inline_bool_code_str = """\ ((%(v)s=%(e)s) === null || %(v)s === false || %(v)s === 0 || %(v)s === '' ? false : (typeof %(v)s=='object'? (typeof %(v)s.__nonzero__=='function'? %(v)s.__nonzero__() : (typeof %(v)s.__len__=='function'? (%(v)s.__len__()>0 ? true : false) : true ) ) : true ) )""" __inline_bool_code_str = __inline_bool_code_str.replace(" ", "\t").replace("\n", "\n%(s)s") def inline_bool_code(self, e): if self.stupid_mode: return bracket_fn(e) if self.inline_bool: v = self.uniqid('$bool') self.add_lookup('variable', v, v) s = self.spacing() return self.__inline_bool_code_str % locals() return "$p['bool'](%(e)s)" % locals() __inline_len_code_str1 = """((%(v)s=%(e)s) === null?%(zero)s: (typeof %(v)s.__array != 'undefined' ? %(v)s.__array.length: (typeof %(v)s.__len__ == 'function'?%(v)s.__len__(): (typeof %(v)s.length != 'undefined'?%(v)s.length: @{{len}}(%(v)s)))))""" __inline_len_code_str1 = __inline_len_code_str1.replace(" ", "\t").replace("\n", "\n%(s)s") __inline_len_code_str2 = """((%(v)s=%(e)s) === null?%(zero)s: (typeof %(v)s.__array != 'undefined' ? new $p['int'](%(v)s.__array.length): (typeof %(v)s.__len__ == 'function'?%(v)s.__len__(): (typeof %(v)s.length != 'undefined'? new $p['int'](%(v)s.length): @{{len}}(%(v)s)))))""" __inline_len_code_str2 = __inline_len_code_str2.replace(" ", "\t").replace("\n", "\n%(s)s") def inline_len_code(self, e): if self.inline_len: v = self.uniqid('$len') self.add_lookup('variable', v, v) zero = '0' s = self.spacing() if not self.number_classes: return self.__inline_len_code_str1 % locals() self.constant_int['0'] = 1 zero = "$constant_int_0" return self.__inline_len_code_str2 % locals() return "@{{len}}(%(e)s)" % locals() __inline_eq_code_str = """((%(v1)s=%(e1)s)===(%(v2)s=%(e2)s)&&%(v1)s===null?true: (%(v1)s===null?false:(%(v2)s===null?false: ((typeof %(v1)s=='object'||typeof %(v1)s=='function')&&typeof %(v1)s.__cmp__=='function'?%(v1)s.__cmp__(%(v2)s) === 0: ((typeof %(v2)s=='object'||typeof %(v2)s=='function')&&typeof %(v2)s.__cmp__=='function'?%(v2)s.__cmp__(%(v1)s) === 0: %(v1)s==%(v2)s)))))""" __inline_eq_code_str = __inline_eq_code_str.replace(" ", "\t").replace("\n", "\n%(s)s") def inline_eq_code(self, e1, e2): if self.inline_eq and not self.number_classes: v1 = self.uniqid('$eq') v2 = self.uniqid('$eq') self.add_lookup('variable', v1, v1) self.add_lookup('variable', v2, v2) s = self.spacing() return self.__inline_eq_code_str % locals() return "@{{op_eq}}(%(e1)s, %(e2)s)" % locals() __inline_cmp_code_str = """((%(v1)s=%(e1)s)===(%(v2)s=%(e2)s)?0: (typeof %(v1)s==typeof %(v2)s && ((typeof %(v1)s == 'number')||(typeof %(v1)s == 'string')||(typeof %(v1)s == 'boolean'))? (%(v1)s == %(v2)s ? 0 : (%(v1)s < %(v2)s ? -1 : 1)): @{{cmp}}(%(v1)s, %(v2)s)))""" __inline_cmp_code_str = __inline_cmp_code_str.replace(" ", "\t").replace("\n", "\n%(s)s") def inline_cmp_code(self, e1, e2): if self.inline_cmp: v1 = self.uniqid('$cmp') v2 = self.uniqid('$cmp') self.add_lookup('variable', v1, v1) self.add_lookup('variable', v2, v2) s = self.spacing() return self.__inline_cmp_code_str % locals() return "@{{cmp}}(%(e1)s, %(e2)s)" % locals() __inline_getitem_code_str = """(typeof (%(v1)s=%(e)s).__array != 'undefined'? ((typeof %(v1)s.__array[%(v2)s=%(i)s]) != 'undefined'?%(v1)s.__array[%(v2)s]: %(v1)s.__getitem__(%(v2)s)): %(v1)s.__getitem__(%(i)s))""" __inline_getitem_code_str = __inline_getitem_code_str.replace(" ", "\t").replace("\n", "\n%(s)s") def inline_getitem_code(self, e, i): if self.inline_getitem: v1 = self.uniqid('$') self.add_lookup('variable', v1, v1) v2 = self.uniqid('$') self.add_lookup('variable', v2, v2) s = self.spacing() return self.__inline_getitem_code_str % locals() return "%(e)s.__getitem__(%(i)s)" % locals() def md5(self, node): return md5(self.module_name + str(node.lineno) + repr(node)).hexdigest() def track_lineno(self, node, module=False): if self.source_tracking and node.lineno: if module: self.w( self.spacing() + "$pyjs.track.module='%s';" % self.module_name) if self.line_tracking: self.w( self.spacing() + "$pyjs.track.lineno=%d;" % node.lineno) #self.w( self.spacing() + "if ($pyjs.track.module!='%s') debugger;" % self.module_name) if self.store_source: self.track_lines[node.lineno] = self.get_line_trace(node) def track_call(self, call_code, lineno=None): if not self.ignore_debug and self.debug and len(call_code.strip()) > 0: dbg = self.uniqid("$pyjs_dbg_") mod = self.module_name s = self.spacing() call_code = """\ (function(){try{try{$pyjs.in_try_except += 1; %(s)sreturn %(call_code)s; }finally{$pyjs.in_try_except-=1;}}catch(%(dbg)s_err){\ if (!@{{isinstance}}(%(dbg)s_err, @{{StopIteration}}))\ {@{{_handle_exception}}(%(dbg)s_err);}\ throw %(dbg)s_err; }})()""" % locals() return call_code __generator_code_str = """\ var $generator_state = [0], $generator_exc = [null], $yield_value = null, $exc = null, $is_executing=false; var $generator = function () {}; $generator['next'] = function (noStop) { %(src1)s var $res; $yield_value = $exc = null; try { $res = $generator['$genfunc'](); $is_executing=false; if (typeof $res == 'undefined') { if (noStop === true) { $generator_state[0] = -1; return; } throw @{{StopIteration}}(); } } catch (e) { %(src2)s $is_executing=false; $generator_state[0] = -1; if (noStop === true && @{{isinstance}}(e, @{{StopIteration}})) { return; } throw e; } return $res; }; $generator['__iter__'] = function () {return $generator;}; $generator['send'] = function ($val) { %(src1)s $yield_value = $val; $exc = null; try { var $res = $generator['$genfunc'](); if (typeof $res == 'undefined') throw @{{StopIteration}}(); } catch (e) { %(src2)s $generator_state[0] = -1; $is_executing=false; throw e; } $is_executing=false; return $res; }; $generator['$$throw'] = function ($exc_type, $exc_value) { %(src1)s $yield_value = null; $exc=(typeof $exc_value == 'undefined' ? $exc_type() : (@{{isinstance}}($exc_value, $exc_type) ? $exc_value : $exc_type($exc_value))); try { var $res = $generator['$genfunc'](); } catch (e) { %(src2)s $generator_state[0] = -1; $is_executing=false; throw (e); } $is_executing=false; return $res; }; $generator['close'] = function () { %(src1)s $yield_value = null; $exc=@{{GeneratorExit}}; try { var $res = $generator['$genfunc'](); $is_executing=false; if (typeof $res != 'undefined') throw @{{RuntimeError}}('generator ignored GeneratorExit'); } catch (e) { %(src2)s $generator_state[0] = -1; $is_executing=false; if (@{{isinstance}}(e, @{{StopIteration}}) || @{{isinstance}}(e, @{{GeneratorExit}})) return null; throw (e); } return null; }; $generator['$genfunc'] = function () { var $yielding = false; if ($is_executing) throw @{{ValueError}}('generator already executing'); $is_executing = true; """ __generator_code_str = __generator_code_str.replace(" ", "\t").replace("\n", "\n%(s)s") def generator(self, code): if self.is_generator: s = self.spacing() if self.source_tracking: src1 = "var $pyjs__trackstack_size_%d = $pyjs.trackstack.length;" % self.stacksize_depth src2 = """\ %(s)ssys.save_exception_stack(); %(s)sif ($pyjs.trackstack.length > $pyjs__trackstack_size_%(d)d) { %(s)s\t$pyjs.trackstack = $pyjs.trackstack.slice(0,$pyjs__trackstack_size_%(d)d); %(s)s\t$pyjs.track = $pyjs.trackstack.slice(-1)[0]; %(s)s} %(s)s$pyjs.track.module='%(m)s';""" % {'s': self.spacing(), 'd': self.stacksize_depth, 'm': self.module_name} else: src1 = src2 = "" self.w( self.__generator_code_str % locals()) self.indent() self.w( code) self.w( self.spacing() + "return;") self.w( self.dedent() + "};") self.w( self.spacing() + "return $generator;") else: self.w( captured_output, False) def generator_switch_open(self): if self.is_generator: self.indent() def generator_switch_case(self, increment): if self.is_generator: if increment: self.generator_states[-1] += 1 n_states = len(self.generator_states) state = self.generator_states[-1] if self.generator_states[-1] == 0: self.dedent() self.w( self.indent() + """if (typeof $generator_state[%d] == 'undefined' || $generator_state[%d] === 0) {""" % (n_states-1, n_states-1)) self.generator_clear_state() if n_states == 1: self.generator_throw() else: if increment: self.w( self.spacing() + """$generator_state[%d]=%d;""" % (n_states-1, state)) self.w( self.dedent() + "}") self.w( self.indent() + """if ($generator_state[%d] == %d) {""" % (n_states-1, state)) def generator_switch_close(self): if self.is_generator: self.w( self.dedent() + "}") def generator_add_state(self): if self.is_generator: self.generator_states.append(0) self.state_max_depth = len(self.generator_states) def generator_del_state(self): if self.is_generator: del self.generator_states[-1] def generator_clear_state(self): if self.is_generator: n_states = len(self.generator_states) self.w( self.spacing() + """for (var $i = %d ; $i < ($generator_state.length<%d?%d:$generator_state.length); $i++) $generator_state[$i]=0;""" % (n_states-1, n_states+1, n_states+1)) def generator_reset_state(self): if self.is_generator: n_states = len(self.generator_states) self.w( self.spacing() + """$generator_state.splice(%d, $generator_state.length-%d);""" % (n_states, n_states)) def generator_throw(self): self.w( self.indent() + "if (typeof $exc != 'undefined' && $exc !== null) {") self.w( self.spacing() + "$yielding = null;") self.w( self.spacing() + "$generator_state[%d] = -1;" % (len(self.generator_states)-1,)) self.w( self.spacing() + "throw $exc;") self.w( self.dedent() + "}") def func_args(self, node, current_klass, function_name, bind_type, args, stararg, dstararg): try: bind_type = BIND_TYPES_NUMERIC[bind_type] except KeyError: raise TranslationError("Unknown bind type: %s" % bind_type, node) _args = [] default_pos = len(args) - len(node.defaults) for idx, arg in enumerate(args): if idx < default_pos: _args.append("['%s']" % arg) else: default_value = self.expr(node.defaults[idx-default_pos], current_klass) _args.append("""['%s', %s]""" % (arg, default_value)) args = ",".join(_args) if dstararg: args = "['%s'],%s" % (dstararg, args) else: args = "null,%s" % args if stararg: args = "'%s',%s" % (stararg, args) else: args = "null,%s" % args args = '[' + args + ']' # remove any empty tail if args.endswith(',]'): args = args[:-2] + ']' if function_name is None: self.w( "\t, %d, %s);" % (bind_type, args)) else: self.w( self.spacing() + "%s.__bind_type__ = %s;" % (function_name, bind_type)) self.w( self.spacing() + "%s.__args__ = %s;" % (function_name, args)) def _instance_method_init(self, node, arg_names, varargname, kwargname, current_klass, output=None): output = output or self.output maxargs1 = len(arg_names) - 1 maxargs2 = len(arg_names) minargs1 = maxargs1 - len(node.defaults) minargs2 = maxargs2 - len(node.defaults) if node.kwargs: maxargs1 += 1 maxargs2 += 1 maxargs1str = "%d" % maxargs1 maxargs2str = "%d" % maxargs2 if node.varargs: argcount1 = "arguments.length < %d" % minargs1 maxargs1str = "null" elif minargs1 == maxargs1: argcount1 = "arguments.length != %d" % minargs1 else: argcount1 = "(arguments.length < %d || arguments.length > %d)" % (minargs1, maxargs1) if node.varargs: argcount2 = "arguments.length < %d" % minargs2 maxargs2str = "null" elif minargs2 == maxargs2: argcount2 = "arguments.length != %d" % minargs2 else: argcount2 = "(arguments.length < %d || arguments.length > %d)" % (minargs2, maxargs2) s = self.spacing() if self.create_locals: args = ["this", "arguments"] args.append("%d" % len(node.defaults)) args.append(bool(node.varargs) and "true" or "false") args.append(bool(node.kwargs) and "true" or "false") args = ", ".join(args) self.w(s + "var $l = $pyjs_instance_method_get(%s);" % args) args = [] if node.varargs: args.append("%(varargname)s = $l.%(varargname)s" % locals()) if node.kwargs: args.append("%(kwargname)s = $l.%(kwargname)s" % locals()) args = ", ".join(args) if args: self.w( s + "var %s;" % args) if arg_names: an = arg_names[0] self.w( s + "var %s = $l.%s;" % (an, an)) args = [] for an in arg_names[1:]: args.append("%s = $l.%s" % (an, an)) if args: args = ", ".join(args) self.w( s + "%s;" % args) if False: #arg_names: an = arg_names[0] self.w( s + "if (this.__is_instance__ === true) {") self.w( s + "\tvar %s = this;" % an) self.w( s + "} else {") self.w( s + "\t%s = $l.%s;" % (an, an)) self.w( s + "}") for an in arg_names[1:]: an = (an, an, an) self.w(s + "%s = $pyjsdf(%s, $l.%s);" % an) return lpself = "var " lp = "" self.w(self.indent() + """\ if (this.__is_instance__ === true) {\ """, output=output) if arg_names: self.w( self.spacing() + """\ %s%s = this;\ """ % (lpself, arg_names[0]), output=output) if node.varargs: self._varargs_handler(node, varargname, maxargs1, lp) if node.kwargs: self.w( self.spacing() + """\ %s%s = arguments.length >= %d ? arguments[arguments.length-1] : arguments[arguments.length];\ """ % (lpself, kwargname, maxargs1), output=output) s = self.spacing() self.w( """\ %(s)sif (typeof %(lp)s%(kwargname)s != 'object' || %(lp)s%(kwargname)s.__name__ != 'dict' || typeof %(lp)s%(kwargname)s.$pyjs_is_kwarg == 'undefined') {\ """ % locals(), output=output) if node.varargs: self.w( """\ %(s)s\tif (typeof %(lp)s%(kwargname)s != 'undefined') %(lp)s%(varargname)s.__array.push(%(lp)s%(kwargname)s);\ """ % locals(), output=output) self.w( """\ %(s)s\t%(lpself)s%(kwargname)s = arguments[arguments.length+1]; %(s)s} else { %(s)s\tdelete %(lp)s%(kwargname)s['$pyjs_is_kwarg']; %(s)s}\ """ % locals(), output=output) if self.function_argument_checking: self.w( self.spacing() + """\ if ($pyjs.options.arg_count && %s) $pyjs__exception_func_param(arguments.callee.__name__, %d, %s, arguments.length+1);\ """ % (argcount1, minargs2, maxargs2str), output=output) self.w( self.dedent() + """\ } else {\ """, output=output) self.indent() if arg_names: self.w( self.spacing() + """\ %s%s = arguments[0];\ """ % (lpself, arg_names[0]), output=output) arg_idx = 0 for arg_name in arg_names[1:]: arg_idx += 1 self.w( self.spacing() + """\ %s%s = arguments[%d];\ """ % (lp, arg_name, arg_idx), output=output) if node.varargs: self._varargs_handler(node, varargname, maxargs2, lp) if node.kwargs: self.w( self.spacing() + """\ %s%s = arguments.length >= %d ? arguments[arguments.length-1] : arguments[arguments.length];\ """ % (lpself, kwargname, maxargs2), output=output) s = self.spacing() self.w( """\ %(s)sif (typeof %(lp)s%(kwargname)s != 'object' || %(lp)s%(kwargname)s.__name__ != 'dict' || typeof %(lp)s%(kwargname)s.$pyjs_is_kwarg == 'undefined') {\ """ % locals(), output=output) if node.varargs: self.w( """\ %(s)s\tif (typeof %(lp)s%(kwargname)s != 'undefined') %(lp)s%(varargname)s.__array.push(%(lp)s%(kwargname)s);\ """ % locals(), output=output) self.w( """\ %(s)s\t%(lp)s%(kwargname)s = arguments[arguments.length+1]; %(s)s} else { %(s)s\tdelete %(lp)s%(kwargname)s['$pyjs_is_kwarg']; %(s)s}\ """ % locals(), output=output) if self.function_argument_checking: self.w( """\ %sif ($pyjs.options.arg_is_instance && self.__is_instance__ !== true) $pyjs__exception_func_instance_expected(arguments.callee.__name__, arguments.callee.__class__.__name__, self); %sif ($pyjs.options.arg_count && %s) $pyjs__exception_func_param(arguments.callee.__name__, %d, %s, arguments.length);\ """ % (self.spacing(), self.spacing(), argcount2, minargs2, maxargs2str), output=output) self.w( self.dedent() + "}", output=output) if arg_names and self.function_argument_checking: self.w( """\ %(s)sif ($pyjs.options.arg_instance_type) { %(s)s\tif (%(self)s.prototype.__md5__ !== '%(__md5__)s') { %(s)s\t\tif (!@{{_isinstance}}(%(self)s, arguments['callee']['__class__'])) { %(s)s\t\t\t$pyjs__exception_func_instance_expected(arguments['callee']['__name__'], arguments['callee']['__class__']['__name__'], %(self)s); %(s)s\t\t} %(s)s\t} %(s)s}\ """ % {'s': self.spacing(), 'self': arg_names[0], '__md5__': current_klass.__md5__}, output=output) def _static_method_init(self, node, arg_names, varargname, kwargname, current_klass, output=None): output = output or self.output maxargs = len(arg_names) minargs = maxargs - len(node.defaults) maxargsstr = "%d" % maxargs s = self.spacing() if False: # self.create_locals: lp = "$l." lpdec = "" self.w(s + "var $l = {};") arg_idx = 0 for arg_name in arg_names: self.w( s + """%s%s = arguments[%d];""" % \ (lp, arg_name, arg_idx), output=output) arg_idx += 1 else: lpdec = "var " lp = "" if node.kwargs: maxargs += 1 if node.varargs: argcount = "arguments.length < %d" % minargs maxargsstr = "null" elif minargs == maxargs: argcount = "arguments.length != %d" % minargs else: argcount = "(arguments.length < %d || arguments.length > %d)" % (minargs, maxargs) if self.function_argument_checking: self.w( self.spacing() + """\ if ($pyjs.options.arg_count && %s) $pyjs__exception_func_param(arguments.callee.__name__, %d, %s, arguments.length);\ """ % (argcount, minargs, maxargsstr), output=output) if node.varargs: self._varargs_handler(node, varargname, maxargs, lp) if node.kwargs: self.w( self.spacing() + """\ %s%s = arguments.length >= %d ? arguments[arguments.length-1] : arguments[arguments.length];\ """ % (lpdec, kwargname, maxargs), output=output) s = self.spacing() self.w( """\ %(s)sif (typeof %(lp)s%(kwargname)s != 'object' || %(lp)s%(kwargname)s.__name__ != 'dict' || typeof %(lp)s%(kwargname)s.$pyjs_is_kwarg == 'undefined') {\ """ % locals(), output=output) if node.varargs: self.w( """\ %(s)s\tif (typeof %(lp)s%(kwargname)s != 'undefined') %(varargname)s.__array.push(%(lp)s%(kwargname)s);\ """ % locals(), output=output) self.w( """\ %(s)s\t%(lp)s%(kwargname)s = arguments[arguments.length+1]; %(s)s} else { %(s)s\tdelete %(lp)s%(kwargname)s['$pyjs_is_kwarg']; %(s)s}\ """ % locals(), output=output) def _class_method_init(self, node, arg_names, varargname, kwargname, current_klass, output=None): output = output or self.output maxargs = max(0, len(arg_names) -1) minargs = max(0, maxargs - len(node.defaults)) maxargsstr = "%d" % (maxargs+1) if node.kwargs: maxargs += 1 if node.varargs: argcount = "arguments.length < %d" % minargs maxargsstr = "null" elif minargs == maxargs: argcount = "arguments.length != %d" % minargs maxargsstr = "%d" % (maxargs) else: argcount = "(arguments.length < %d || arguments.length > %d)" % (minargs, maxargs) if self.function_argument_checking: self.w( """\ if ($pyjs.options.arg_is_instance && this.__is_instance__ !== true && this.__is_instance__ !== false) $pyjs__exception_func_class_expected(arguments.callee.__name__, arguments.callee.__class__.__name__); if ($pyjs.options.arg_count && %s) $pyjs__exception_func_param(arguments.callee.__name__, %d, %s, arguments.length);\ """ % (argcount, minargs+1, maxargsstr), output=output) self.w( """\ var %s = this.prototype;\ """ % (arg_names[0],), output=output) if node.varargs: self._varargs_handler(node, varargname, maxargs, "") if node.kwargs: self.w( self.spacing() + """\ var %s = arguments.length >= %d ? arguments[arguments.length-1] : arguments[arguments.length];\ """ % (kwargname, maxargs), output=output) s = self.spacing() self.w( """\ %(s)sif (typeof %(kwargname)s != 'object' || %(kwargname)s.__name__ != 'dict' || typeof %(kwargname)s.$pyjs_is_kwarg == 'undefined') {\ """ % locals(), output=output) if node.varargs: self.w( """\ %(s)s\tif (typeof %(kwargname)s != 'undefined') %(varargname)s.__array.push(%(kwargname)s);\ """ % locals(), output=output) self.w( """\ %(s)s\t%(kwargname)s = arguments[arguments.length+1]; %(s)s}\ """ % locals(), output=output) def _default_args_handler(self, node, arg_names, current_klass, kwargname, lp, output=None): output = output or self.output if node.kwargs: # This is necessary when **kwargs in function definition # and the call didn't pass the pyjs_kwargs_call(). # See libtest testKwArgsInherit # This is not completely safe: if the last element in arguments # is an dict and the corresponding argument shoud be a dict and # the kwargs should be empty, the kwargs gets incorrectly the # dict and the argument becomes undefined. # E.g. # def fn(a = {}, **kwargs): pass # fn({'a':1}) -> a gets undefined and kwargs gets {'a':1} revargs = arg_names[0:] revargs.reverse() self.w( """\ %(s)sif (typeof %(lp)s%(k)s == 'undefined') { %(s)s\t%(lp)s%(k)s = @{{__empty_dict}}();\ """ % {'lp': lp, 's': self.spacing(), 'k': kwargname}, output=output) for v in revargs: self.w( """\ %(s)s\tif (typeof %(lp)s%(v)s != 'undefined') { %(s)s\t\tif (%(lp)s%(v)s !== null && typeof %(lp)s%(v)s['$pyjs_is_kwarg'] != 'undefined') { %(s)s\t\t\t%(lp)s%(k)s = %(lp)s%(v)s; %(s)s\t\t\t%(lp)s%(v)s = arguments[%(a)d]; %(s)s\t\t} %(s)s\t} else\ """ % {'lp': lp, 's': self.spacing(), 'v': v, 'k': kwargname, 'a': len(arg_names)}, False, output=output) self.w( """\ { %(s)s\t} %(s)s}\ """ % {'s': self.spacing()}, output=output) if len(node.defaults): default_pos = len(arg_names) - len(node.defaults) for default_node in node.defaults: #default_value = self.expr(default_node, current_klass) default_name = arg_names[default_pos] default_pos += 1 #self.w( self.spacing() + "if (typeof %s == 'undefined') %s=%s;" % (default_name, default_name, default_value)) self.w( self.spacing() + "if (typeof %s%s == 'undefined') %s%s=arguments.callee.__args__[%d][1];" % (lp, default_name, lp, default_name, default_pos+1), output=output) def _varargs_handler(self, node, varargname, start, lp): if node.kwargs: end = "arguments.length-1" start -= 1 else: end = "arguments.length" if not lp: lp = 'var ' self.w( """\ %(s)s%(lp)s%(v)s = $p['tuple']($pyjs_array_slice.call(arguments,%(b)d,%(e)s)); """ % {'s': self.spacing(), 'v': varargname, 'b': start, 'e': end, 'lp': lp}) def _kwargs_parser(self, node, function_name, arg_names, current_klass, method_ = False): default_pos = len(arg_names) - len(node.defaults) if not method_: self.w( self.indent() + function_name+'.parse_kwargs = function (', ", ".join(["__kwargs"]+arg_names) + " ) {") else: self.w( self.indent() + ", function (", ", ".join(["__kwargs"]+arg_names) + " ) {") self.w( self.spacing() + "var __r = [];") self.w( self.spacing() + "var $pyjs__va_arg_start = %d;" % (len(arg_names)+1)) if len(arg_names) > 0: self.w( """\ %(s)sif (typeof %(arg_name)s != 'undefined' && this.__is_instance__ === false && %(arg_name)s.__is_instance__ === true) { %(s)s\t__r.push(%(arg_name)s); %(s)s\t$pyjs__va_arg_start++;""" % {'s': self.spacing(), 'arg_name': arg_names[0]}) idx = 1 for arg_name in arg_names: idx += 1 self.w( """\ %(s)s\t%(arg_name)s = arguments[%(idx)d];\ """ % {'s': self.spacing(), 'arg_name': arg_name, 'idx': idx}) self.w( self.spacing() + "}") for arg_name in arg_names: if self.function_argument_checking: self.w( """\ %(s)sif (typeof %(arg_name)s == 'undefined') { %(s)s\t%(arg_name)s=__kwargs.%(arg_name)s; %(s)s\tdelete __kwargs.%(arg_name)s; %(s)s} else if ($pyjs.options.arg_kwarg_multiple_values && typeof __kwargs.%(arg_name)s != 'undefined') { %(s)s\t$pyjs__exception_func_multiple_values('%(function_name)s', '%(arg_name)s'); %(s)s}\ """ % {'s': self.spacing(), 'arg_name': arg_name, 'function_name': function_name}) else: self.w( self.indent() + "if (typeof %s == 'undefined') {"%(arg_name)) self.w( self.spacing() + "%s=__kwargs.%s;"% (arg_name, arg_name)) self.w( self.dedent() + "}") self.w( self.spacing() + "__r.push(%s);" % arg_name) if self.function_argument_checking and not node.kwargs: self.w( """\ %(s)sif ($pyjs.options.arg_kwarg_unexpected_keyword) { %(s)s\tfor (var i in __kwargs) { %(s)s\t\t$pyjs__exception_func_unexpected_keyword('%(function_name)s', i); %(s)s\t} %(s)s}\ """ % {'s': self.spacing(), 'function_name': function_name}) # Always add all remaining arguments. Needed for argument checking _and_ if self != this; self.w( """\ %(s)sfor (var $pyjs__va_arg = $pyjs__va_arg_start;$pyjs__va_arg < arguments.length;$pyjs__va_arg++) { %(s)s\t__r.push(arguments[$pyjs__va_arg]); %(s)s} """ % {'s': self.spacing()}) if node.kwargs: self.w( self.spacing() + "__r.push($p['dict'](__kwargs));") self.w( self.spacing() + "return __r;") if not method_: self.w( self.dedent() + "};") else: self.w( self.dedent() + "});") def _import(self, node, current_klass, root_level = False): # XXX: hack for in-function checking, we should have another # object to check our scope self._doImport(node.names, current_klass, root_level, True) def _doImport(self, names, current_klass, root_level, assignBase, absPath=False, all=False): if root_level: modtype = 'root-module' else: modtype = 'module' for importName, importAs in names: if importName == '__pyjamas__': continue if importName.endswith(".js"): self.add_imported_module(importName) continue # "searchList" contains a list of possible module names : # We create the list at compile time to save runtime. searchList = [] context = self.module_name if '.' in context: # our context lives in a package so it is possible to have a # relative import package = context.rsplit('.', 1)[0] relName = package + '.' + importName searchList.append(relName) if '.' in importName: searchList.append(relName.rsplit('.', 1)[0]) # the absolute path searchList.append(importName) if '.' in importName: searchList.append(importName.rsplit('.', 1)[0]) mod = self.lookup(importName) package_mod = self.lookup(importName.split('.', 1)[0]) if self.source_tracking: self.w( self.spacing() + "$pyjs.track={module:$pyjs.track.module,lineno:$pyjs.track.lineno};$pyjs.trackstack.push($pyjs.track);") import_stmt = None if ( mod[0] != 'root-module' or (assignBase and not package_mod[0] in ['root-module', 'module']) ): # the import statement if absPath: context = 'null' else: context = self.import_context if not all: import_stmt = "@{{___import___}}('%s', %s" % ( importName, context, ) else: import_stmt = "@{{__import_all__}}('%s', %s, %s" %( importName, context, self.modpfx()[:-1], ) if not assignBase: self.w( self.spacing() + import_stmt + ', null, false);') self._lhsFromName(importName, current_klass, modtype) self.add_imported_module(importName) if assignBase: # get the name in scope package_name = importName.split('.')[0] if importAs: ass_name = importAs if not import_stmt is None: import_stmt += ', null, false' else: ass_name = package_name lhs = self._lhsFromName(ass_name, current_klass, modtype) if importAs: mod_name = importName else: mod_name = ass_name if import_stmt is None: #stmt = "%s = $pyjs.__modules__['%s'];"% (lhs, "']['".join(mod_name.split('.'))) parent_mod_name = mod_name.split('.') if len(parent_mod_name) == 1: stmt = "%s = $pyjs.loaded_modules['%s'];"% (lhs, mod_name) else: mod_name = parent_mod_name[-1] parent_mod_name = '.'.join(parent_mod_name[:-1]) stmt = "%s = $pyjs.loaded_modules['%s']['%s'];"% (lhs, parent_mod_name, mod_name) else: stmt = "%s = %s);"% (lhs, import_stmt) self.w( self.spacing() + stmt) if self.source_tracking: self.w( self.spacing() + "$pyjs.trackstack.pop();$pyjs.track=$pyjs.trackstack.pop();$pyjs.trackstack.push($pyjs.track);") def _from(self, node, current_klass, root_level = False): if node.modname == '__pyjamas__': # special module to help make pyjamas modules loadable in # the python interpreter for name in node.names: ass_name = name[1] or name[0] try: jsname = getattr(__pyjamas__, name[0]) if callable(jsname): self.add_lookup("__pyjamas__", ass_name, name[0]) else: self.add_lookup("__pyjamas__", ass_name, jsname) except AttributeError, e: #raise TranslationError("Unknown __pyjamas__ import: %s" % name, node) pass return if node.modname == '__javascript__': for name in node.names: ass_name = name[1] or name[0] self.add_lookup("__javascript__", ass_name, name[0]) return if node.modname == '__future__': for name in node.names: future = getattr(__future__, name[0], None) if callable(future): future(self) else: # Ignoring from __future__ import name[0] pass return # XXX: hack for in-function checking, we should have another # object to check our scope absPath = False modname = node.modname if hasattr(node, 'level') and node.level > 0: absPath = True modname = self.relative_import_context.split('.') level = node.level - 1 if len(modname) <= level: raise TranslationError( "Attempted relative import beyond toplevel package", node, self.module_name) if level: modname = '.'.join(modname[:-node.level]) else: modname = self.relative_import_context if node.modname: modname += '.' + node.modname for name in node.names: if name[0] == "*": self._doImport(((modname, name[0]),), current_klass, root_level, False, absPath, True) continue sub = modname + '.' + name[0] ass_name = name[1] or name[0] self._doImport(((sub, ass_name),), current_klass, root_level, True, absPath) def _function(self, node, current_klass, force_local=False): if self.is_class_definition: return self._method(node, current_klass) save_top_level = self.top_level self.push_options() save_has_js_return = self.has_js_return self.has_js_return = False save_has_yield = self.has_yield self.has_yield = False save_is_generator = self.is_generator self.is_generator = False save_generator_states = self.generator_states self.generator_states = [0] self.state_max_depth = len(self.generator_states) if not save_top_level or force_local: function_name = node.name else: function_name = self.modpfx() + node.name function_name = self.add_lookup('function', node.name, function_name) staticmethod, classmethod, decorator_code = self.parse_decorators(node, node.name, current_klass) if staticmethod or classmethod: raise TranslationError( "Decorators staticmethod and classmethod not implemented for functions", v.node, self.module_name) self.push_lookup() arg_names = [] for arg in node.argnames: if isinstance(arg, tuple): for a in arg: arg_names.append(self.add_lookup('variable', a, a)) else: arg_names.append(self.add_lookup('variable', arg, arg)) normal_arg_names = list(arg_names) if node.kwargs: kwargname = normal_arg_names.pop() else: kwargname = None if node.varargs: varargname = normal_arg_names.pop() else: varargname = None declared_arg_names = list(normal_arg_names) #if node.kwargs: declared_arg_names.append(kwargname) function_args = "(" + ", ".join(declared_arg_names) + ")" self.w( self.indent() + "%s = function%s {" % (function_name, function_args)) self._static_method_init(node, declared_arg_names, varargname, kwargname, None) #lp = self.create_locals and "$l." or "" self._default_args_handler(node, declared_arg_names, None, kwargname, "") local_arg_names = normal_arg_names + declared_arg_names if node.kwargs: local_arg_names.append(kwargname) if node.varargs: local_arg_names.append(varargname) self.top_level = False save_output = self.output self.output = StringIO() if self.source_tracking: self.w( self.spacing() + "$pyjs.track={module:'%s',lineno:%d};$pyjs.trackstack.push($pyjs.track);" % (self.module_name, node.lineno)) self.track_lineno(node, True) for child in node.code: self._stmt(child, None) if not self.has_yield and self.source_tracking and self.has_js_return: self.source_tracking = False self.output = StringIO() for child in node.code: self._stmt(child, None) elif self.has_yield: if self.has_js_return: self.source_tracking = False self.is_generator = True self.generator_states = [0] self.output = StringIO() self.indent() if self.source_tracking: self.w( self.spacing() + "$pyjs.track={module:'%s',lineno:%d};$pyjs.trackstack.push($pyjs.track);" % (self.module_name, node.lineno)) self.track_lineno(node, True) self.generator_switch_open() self.generator_switch_case(increment=False) for child in node.code: self._stmt(child, None) self.generator_switch_case(increment=True) self.generator_switch_close() self.dedent() captured_output = self.output.getvalue() self.output = save_output self.w( self.local_js_vars_decl(local_arg_names)) if self.is_generator: self.generator(captured_output) else: self.w( captured_output, False) # we need to return null always, so it is not undefined if node.code.nodes: lastStmt = node.code.nodes[-1] else: lastStmt = None if not isinstance(lastStmt, self.ast.Return): if self.source_tracking: self.w( self.spacing() + "$pyjs.trackstack.pop();$pyjs.track=$pyjs.trackstack.pop();$pyjs.trackstack.push($pyjs.track);") # FIXME: check why not on on self._isNativeFunc(lastStmt) if not self._isNativeFunc(lastStmt): self.w( self.spacing() + "return null;") self.w( self.dedent() + "};") self.w( self.spacing() + "%s.__name__ = '%s';\n" % (function_name, node.name)) self.pop_lookup() self.func_args(node, current_klass, function_name, 'func', declared_arg_names, varargname, kwargname) if decorator_code: decorator_code = decorator_code % function_name if function_name != decorator_code: self.w( self.spacing() + "%s = %s;" % (function_name, decorator_code)) self.generator_states = save_generator_states self.state_max_depth = len(self.generator_states) self.is_generator = save_is_generator self.has_yield = save_has_yield self.has_js_return = save_has_js_return self.pop_options() self.top_level = save_top_level def _assert(self, node, current_klass): expr = self.expr(node.test, current_klass) if node.fail: fail = self.expr(node.fail, current_klass) else: fail = '' self.w( self.spacing() + "if (!( " + expr + " )) {") self.w( self.spacing() + " throw @{{AssertionError}}(%s);" % fail) self.w( self.spacing() + " }") def _return(self, node, current_klass): expr = self.expr(node.value, current_klass) # in python a function call always returns None, so we do it # here too self.track_lineno(node) if self.is_generator: if isinstance(node.value, self.ast.Const): if node.value.value is None: if self.source_tracking: self.w( self.spacing() + "$pyjs.trackstack.pop();$pyjs.track=$pyjs.trackstack.pop();$pyjs.trackstack.push($pyjs.track);") self.w( self.spacing() + "return;") return raise TranslationError( "'return' with argument inside generator", node, self.module_name) elif self.source_tracking: self.w( self.spacing() + "var $pyjs__ret = " + expr + ";") self.w( self.spacing() + "$pyjs.trackstack.pop();$pyjs.track=$pyjs.trackstack.pop();$pyjs.trackstack.push($pyjs.track);") self.w( self.spacing() + "return $pyjs__ret;") else: self.w( self.spacing() + "return " + expr + ";") def _yield(self, node, current_klass): # http://www.python.org/doc/2.5.2/ref/yieldexpr.html self.has_yield = True expr = self.expr(node.value, current_klass) self.track_lineno(node) #self.w( self.spacing() + "$generator_state[%d] = %d;" % (len(self.generator_states)-1, self.generator_states[-1]+1) self.w( self.spacing() + "$yield_value = " + expr + ";") if self.source_tracking: self.w( self.spacing() + "$pyjs.trackstack.pop();$pyjs.track=$pyjs.trackstack.pop();$pyjs.trackstack.push($pyjs.track);") self.w( self.spacing() + "$yielding = true;") self.w( self.spacing() + "$generator_state[%d] = %d;" % (len(self.generator_states)-1, self.generator_states[-1]+1)) self.w( self.spacing() + "return $yield_value;") self.generator_switch_case(increment=True) self.generator_throw() def _yield_expr(self, node, current_klass): self._yield(node, current_klass) return '$yield_value' def _break(self, node, current_klass): self.generator_switch_case(increment=True) self.w( self.spacing() + "break;") def _continue(self, node, current_klass): self.w( self.spacing() + "continue;") def _callfunc_code(self, v, current_klass, is_statement=False, optlocal_var=False): self.ignore_debug = False method_name = None if isinstance(v.node, self.ast.Name): name_type, pyname, jsname, depth, is_local = self.lookup(v.node.name) if name_type == '__pyjamas__': try: raw_js = getattr(__pyjamas__, v.node.name) if callable(raw_js): raw_js, has_js_return = raw_js(self, v, current_klass, is_statement=is_statement) if has_js_return: self.has_js_return = True else: raw_js = self.translate_escaped_names(raw_js, current_klass) return raw_js except AttributeError, e: raise TranslationError( "Unknown __pyjamas__ function %s" % pyname, v.node, self.module_name) except TranslationError, e: raise TranslationError(e.msg, v, self.module_name) elif v.node.name == 'locals': return """$p.dict({%s})""" % (",".join(["'%s': %s" % (pyname, self.lookup_stack[-1][pyname][2]) for pyname in self.lookup_stack[-1] if self.lookup_stack[-1][pyname][0] not in ['__pyjamas__', 'global']])) elif v.node.name == 'globals': # XXX: Should be dictproxy, to handle changes return "@{{_globals}}(%s)" % self.modpfx()[:-1] elif v.node.name == 'len' and depth == -1 and len(v.args) == 1: expr = self.expr(v.args[0], current_klass) return self.inline_len_code(expr) else: if name_type is None: # What to do with a (yet) unknown name? # Just nothing... if optlocal_var: call_name = '(typeof %s == "undefined"?%s:%s)' % ( v.node.name, self.scopeName(v.node.name, depth, is_local), v.node.name, ) else: call_name = self.scopeName(v.node.name, depth, is_local) else: call_name = jsname call_args = [] elif isinstance(v.node, self.ast.Getattr): attrname = self.attrib_remap(v.node.attrname) if isinstance(v.node.expr, self.ast.Name): call_name, method_name = self._name2(v.node.expr, current_klass, attrname) call_args = [] elif isinstance(v.node.expr, self.ast.Getattr): call_name = self._getattr2(v.node.expr, current_klass, v.node.attrname) method_name = call_name.pop() call_name = self.attrib_join(call_name) call_args = [] elif isinstance(v.node.expr, self.ast.CallFunc): call_name = self._callfunc(v.node.expr, current_klass) method_name = attrname call_args = [] elif isinstance(v.node.expr, self.ast.Subscript): call_name = self._subscript(v.node.expr, current_klass) method_name = attrname call_args = [] elif isinstance(v.node.expr, self.ast.Const): call_name = self.expr(v.node.expr, current_klass) method_name = attrname call_args = [] elif isinstance(v.node.expr, self.ast.Slice): call_name = self._slice(v.node.expr, current_klass) method_name = attrname call_args = [] else: raise TranslationError( "unsupported type (in _callfunc)", v.node.expr, self.module_name) elif isinstance(v.node, self.ast.CallFunc): call_name = self._callfunc(v.node, current_klass) call_args = [] elif isinstance(v.node, self.ast.Subscript): call_name = self._subscript(v.node, current_klass) call_args = [] else: raise TranslationError( "unsupported type (in _callfunc)", v.node, self.module_name) if method_name in pyjs_attrib_remap: method_name = pyjs_attrib_remap[method_name] call_name = strip_py(call_name) kwargs = [] star_arg_name = None if v.star_args: star_arg_name = self.expr(v.star_args, current_klass) dstar_arg_name = None if v.dstar_args: dstar_arg_name = self.expr(v.dstar_args, current_klass) for ch4 in v.args: if isinstance(ch4, self.ast.Keyword): kwarg = self.vars_remap(ch4.name) + ":" + \ self.expr(ch4.expr, current_klass) kwargs.append(kwarg) else: arg = self.expr(ch4, current_klass) call_args.append(arg) if kwargs: fn_args = ", ".join(['{' + ', '.join(kwargs) + '}']+call_args) else: fn_args = ", ".join(['{}']+call_args) if kwargs or star_arg_name or dstar_arg_name: if not star_arg_name: star_arg_name = 'null' if not dstar_arg_name: dstar_arg_name = 'null' if method_name is None: call_code = ("$pyjs_kwargs_call(null, "+call_name+", " + star_arg_name + ", " + dstar_arg_name + ", ["+fn_args+"]" + ")") else: call_code = ("$pyjs_kwargs_call("+call_name+", '"+method_name+"', " + star_arg_name + ", " + dstar_arg_name + ", ["+fn_args+"]" + ")") else: if not method_name is None: call_name = "%s['%s']" % (call_name, method_name) call_code = call_name + "(" + ", ".join(call_args) + ")" return call_code def _callfunc(self, v, current_klass, is_statement=False, optlocal_var=False): call_code = self._callfunc_code( v, current_klass, is_statement=is_statement, optlocal_var=optlocal_var, ) if not self.ignore_debug: call_code = self.track_call(call_code, v.lineno) return call_code def _print(self, node, current_klass): if not self.print_statements: return call_args = [] for ch4 in node.nodes: arg = self.expr(ch4, current_klass) call_args.append(arg) self.w( self.spacing() + self.track_call("@{{printFunc}}([%s], %d)" % (', '.join(call_args), int(isinstance(node, self.ast.Printnl))), node.lineno) + ';') def _tryFinally(self, node, current_klass): body = node.body if not isinstance(node.body, self.ast.TryExcept): body = node try: # python2.N node.body.final = node.final except: # lib2to3 node.body.final_ = node.final_ self._tryExcept(body, current_klass) def _tryExcept(self, node, current_klass): save_is_generator = self.is_generator if self.is_generator: self.is_generator = self.compiler.walk(node, GeneratorExitVisitor(), walker=GeneratorExitVisitor()).has_yield self.try_depth += 1 self.stacksize_depth += 1 save_state_max_depth = self.state_max_depth start_states = len(self.generator_states) pyjs_try_err = '$pyjs_try_err' if self.source_tracking: self.w( self.spacing() + "var $pyjs__trackstack_size_%d = $pyjs.trackstack.length;" % self.stacksize_depth) self.generator_switch_case(increment=True) self.w( self.indent() + "try {") added_try_except_counter = not self.ignore_debug and self.debug if added_try_except_counter: self.w( self.spacing() + "try {") self.indent() self.w( self.spacing() + "$pyjs.in_try_except += 1;") if self.is_generator: self.w( self.spacing() + "if (typeof $generator_exc[%d] != 'undefined' && $generator_exc[%d] !== null) throw $generator_exc[%d];" % (\ self.try_depth, self.try_depth, self.try_depth)) self.generator_add_state() self.generator_switch_open() self.generator_switch_case(increment=False) if self.is_generator: self.w( self.spacing() + "$generator_exc[%d] = null;" % (self.try_depth, )) self.generator_switch_case(increment=True) for stmt in node.body.nodes: self._stmt(stmt, current_klass) self.generator_switch_case(increment=True) if hasattr(node, 'else_') and node.else_: self.w( self.spacing() + "throw @{{TryElse}};") self.generator_switch_case(increment=True) self.generator_switch_case(increment=True) self.generator_switch_close() if added_try_except_counter: self.w( self.dedent() + "} finally { $pyjs.in_try_except -= 1; }") self.w( self.dedent() + "} catch(%s) {" % pyjs_try_err) self.indent() if self.source_tracking: self.w( self.spacing() + "$pyjs.__last_exception_stack__ = sys.save_exception_stack($pyjs__trackstack_size_%d - 1);" % self.stacksize_depth) self.w( self.spacing() + "$pyjs.__active_exception_stack__ = null;") if self.is_generator: self.w( self.spacing() + "$generator_exc[%d] = %s;" % (self.try_depth, pyjs_try_err)) try_state_max_depth = self.state_max_depth self.generator_states += [0 for i in range(save_state_max_depth+1, try_state_max_depth)] if hasattr(node, 'else_') and node.else_: self.w( self.indent() + """\ if (%(e)s.__name__ == 'TryElse') {""" % {'e': pyjs_try_err}) self.generator_add_state() self.generator_switch_open() self.generator_switch_case(increment=False) for stmt in node.else_: self._stmt(stmt, current_klass) self.generator_switch_case(increment=True) self.generator_switch_close() self.generator_del_state() self.w( self.dedent() + """} else {""") self.indent() if self.attribute_checking: self.w( self.spacing() + """%s = @{{_errorMapping}}(%s);""" % (pyjs_try_err, pyjs_try_err)) self.w( self.spacing() + """\ var %(e)s_name = (typeof %(e)s.__name__ == 'undefined' ? %(e)s.name : %(e)s.__name__ );\ """ % {'e': pyjs_try_err}) self.w( self.spacing() + "$pyjs.__last_exception__ = {error: %s, module: %s};" % (pyjs_try_err, self.module_prefix[:-1])) if self.source_tracking: self.w( """\ %(s)sif ($pyjs.trackstack.length > $pyjs__trackstack_size_%(d)d) { %(s)s\t$pyjs.trackstack = $pyjs.trackstack.slice(0,$pyjs__trackstack_size_%(d)d); %(s)s\t$pyjs.track = $pyjs.trackstack.slice(-1)[0]; %(s)s} %(s)s$pyjs.track.module='%(m)s';""" % {'s': self.spacing(), 'd': self.stacksize_depth, 'm': self.module_name}) pyjs_try_err = self.add_lookup('variable', pyjs_try_err, pyjs_try_err) if hasattr(node, 'handlers'): else_str = self.spacing() if len(node.handlers) == 1 and node.handlers[0][0] is None: else_str += "if (true) " for handler in node.handlers: lineno = handler[2].nodes[0].lineno expr = handler[0] as_ = handler[1] if as_: errName = as_.name else: errName = None if not expr: self.w( "%s{" % else_str) else: if expr.lineno: lineno = expr.lineno l = [] if isinstance(expr, self.ast.Tuple): for x in expr.nodes: l.append("((%s_name == %s.__name__)||@{{_isinstance}}(%s,%s))" % (pyjs_try_err, self.expr(x, current_klass),pyjs_try_err, self.expr(x, current_klass))) else: l = [ "(%s_name == %s.__name__)||@{{_isinstance}}(%s,%s)" % (pyjs_try_err, self.expr(expr, current_klass),pyjs_try_err, self.expr(expr, current_klass)) ] self.w( "%sif (%s) {" % (else_str, "||".join(l))) self.indent() if errName: tnode = self.ast.Assign([self.ast.AssName(errName, "OP_ASSIGN", lineno)], self.ast.Name(pyjs_try_err, lineno), lineno) self._assign(tnode, current_klass) self.generator_add_state() self.generator_switch_open() self.generator_switch_case(increment=False) for stmt in handler[2]: self._stmt(stmt, current_klass) self.generator_switch_case(increment=True) self.generator_switch_close() self.generator_del_state() self.w( self.dedent() + "}", False) else_str = "else " if node.handlers[-1][0]: # No default catcher, create one to fall through self.w( "%s{ $pyjs.__active_exception_stack__ = $pyjs.__last_exception_stack__; $pyjs.__last_exception_stack__ = null; throw %s; }" % (else_str, pyjs_try_err)) else: self.w(None) if hasattr(node, 'else_') and node.else_: self.w( self.dedent() + "}") final = None if hasattr(node, 'final'): final = node.final if hasattr(node, 'final_'): final = node.final_ if final is not None: self.w( self.dedent() + "} finally {") self.indent() if self.is_generator: self.w( self.spacing() + "if ($yielding === true) return $yield_value;") #self.w( self.spacing() + "if ($yielding === null) throw $exc;") else_except_state_max_depth = self.state_max_depth self.generator_states = self.generator_states[:save_state_max_depth] self.generator_states += [0 for i in range(save_state_max_depth, else_except_state_max_depth)] self.generator_add_state() self.generator_switch_open() self.generator_switch_case(increment=False) for stmt in final: self._stmt(stmt, current_klass) self.generator_switch_case(increment=True) self.generator_switch_close() self.generator_states = self.generator_states[:start_states+1] self.w( self.dedent() + "}") if self.is_generator: self.w( self.spacing() + "$generator_exc[%d] = null;" % (self.try_depth, )) self.generator_clear_state() self.generator_del_state() self.try_depth -= 1 self.stacksize_depth -= 1 self.generator_switch_case(increment=True) self.is_generator = save_is_generator def _getattr(self, v, current_klass, use_getattr=None): if use_getattr is None: use_getattr = self.getattr_support attr_name = self.attrib_remap(v.attrname) if use_getattr: expr = self.expr(v.expr, current_klass) return ["@{{getattr}}(%s, '%s')" % (expr, attr_name)] if isinstance(v.expr, self.ast.Name): obj = self._name(v.expr, current_klass, return_none_for_module=True) if not use_getattr or attr_name == '__class__' or \ attr_name == '__name__': return [obj, attr_name] return ["@{{getattr}}(%s, '%s')" % (obj, attr_name)] elif isinstance(v.expr, self.ast.Getattr): return self._getattr(v.expr, current_klass) + [attr_name] elif isinstance(v.expr, self.ast.Subscript): return [self._subscript(v.expr, self.modpfx()), attr_name] elif isinstance(v.expr, self.ast.CallFunc): return [self._callfunc(v.expr, self.modpfx()), attr_name] elif isinstance(v.expr, self.ast.Const): return [self._const(v.expr), attr_name] elif isinstance(v.expr, self.ast.List): return [self._list(v.expr, current_klass), attr_name] elif isinstance(v.expr, self.ast.Dict): return [self._dict(v.expr, current_klass), attr_name] elif isinstance(v.expr, self.ast.Tuple): return [self._tuple(v.expr, current_klass), attr_name] elif isinstance(v.expr, self.ast.Lambda): return [self._lambda(v.expr, current_klass), attr_name] elif isinstance(v.expr, self.ast.Slice): return [self._slice(v.expr, current_klass), attr_name] else: raise TranslationError( "unsupported type (in _getattr)", v.expr, self.module_name) def modpfx(self): return strip_py(self.module_prefix) def _name(self, v, current_klass, return_none_for_module=False, optlocal_var=False, ): if not hasattr(v, 'name'): name = v.attrname else: name = v.name name_type, pyname, jsname, depth, is_local = self.lookup(name) if name_type is None: # What to do with a (yet) unknown name? # Just nothing... if not optlocal_var: return self.scopeName(name, depth, is_local) return '(typeof %s == "undefined"?%s:%s)' % ( name, self.scopeName(name, depth, is_local), name, ) return jsname def _name2(self, v, current_klass, attr_name): name_type, pyname, jsname, depth, is_local = self.lookup(v.name) if name_type is None: jsname = self.scopeName(v.name, depth, is_local) return jsname, attr_name def _getattr2(self, v, current_klass, attr_name): if isinstance(v.expr, self.ast.Getattr): return self._getattr2(v.expr, current_klass, v.attrname) + [attr_name] if isinstance(v.expr, self.ast.Name): name_type, pyname, jsname, depth, is_local = self.lookup(v.expr.name) if name_type is None: jsname = self.scopeName(v.expr.name, depth, is_local) return [jsname, v.attrname, attr_name] return [self.expr(v.expr, current_klass), v.attrname, attr_name] def _class(self, node, parent_class = None): save_top_level = self.top_level if parent_class is None: class_name = self.modpfx() + node.name else: class_name = node.name self.top_level = False local_prefix = '$cls_definition' name_scope = {} current_klass = Klass(class_name, name_scope) if self.function_argument_checking or self.module_name == 'pyjslib': current_klass.__md5__ = self.md5(node) if len(node.bases) == 0: base_classes = [("object", "pyjslib.object")] else: base_classes = [] for node_base in node.bases: if isinstance(node_base, self.ast.Name): node_base_name = node_base.name base_class = self._name(node_base, None) elif isinstance(node_base, self.ast.Getattr): # the bases are not in scope of the class so do not # pass our class to self._name node_base_name = node_base.attrname base_class = self.expr(node_base, None) else: raise TranslationError( "unsupported type (in _class)", node_base, self.module_name) base_classes.append((node_base_name, base_class)) current_klass.set_base(base_classes[0][1]) if node.name in ['object', 'pyjslib.Object', 'pyjslib.object']: base_classes = [] class_name = self.add_lookup('class', node.name, class_name) self.w( self.indent() + class_name + """ = (function(){ %(s)svar %(p)s = new Object(); %(s)svar $method; %(s)s%(p)s.__module__ = '%(module)s';""" % {'s': self.spacing(), 'p': local_prefix, 'module': self.module_name}) if self.function_argument_checking or self.module_name == 'pyjslib': self.w( self.spacing() + "%(p)s.__md5__ = '%(m)s';" % {'p': local_prefix, 'm': current_klass.__md5__}) self.push_lookup(name_scope) for child in node.code: self.is_class_definition = True self.local_prefix = local_prefix self._stmt(child, current_klass) create_class = """\ %(s)svar $bases = new Array(%(bases)s);""" if self.module_name == 'pyjslib': create_class += """ %(s)sreturn $pyjs_type('%(n)s', $bases, %(local_prefix)s);""" else: create_class += """ %(s)svar $data = $p['dict'](); %(s)sfor (var $item in %(local_prefix)s) { $data.__setitem__($item, %(local_prefix)s[$item]); } %(s)sreturn @{{_create_class}}('%(n)s', $p['tuple']($bases), $data);""" create_class %= {'n': node.name, 's': self.spacing(), 'local_prefix': local_prefix, 'bases': ",".join(map(lambda x: x[1], base_classes))} create_class += """ %s})();""" % self.dedent() self.w( create_class) self.pop_lookup() self.is_class_definition = None self.local_prefix = None self.top_level = save_top_level def classattr(self, node, current_klass): self._assign(node, current_klass) def _raise(self, node, current_klass): if self.is_generator: self.w( self.spacing() + "$generator_state[%d]=%d;" % (len(self.generator_states)-1, self.generator_states[-1]+1)) if node.expr1: if self.source_tracking: self.w( self.spacing() + "$pyjs.__active_exception_stack__ = null;") if node.expr2: if node.expr3: self.w( """ %(s)svar $pyjs__raise_expr1 = %(expr1)s; %(s)svar $pyjs__raise_expr2 = %(expr2)s; %(s)svar $pyjs__raise_expr3 = %(expr3)s; %(s)sif ($pyjs__raise_expr2 !== null && $pyjs__raise_expr1.__is_instance__ === true) { %(s)s\tthrow @{{TypeError}}('instance exception may not have a separate value'); %(s)s} %(s)s\tthrow ($pyjs__raise_expr1.apply($pyjs__raise_expr1, $pyjs__raise_expr2, $pyjs__raise_expr3)); """ % { 's': self.spacing(), 'expr1': self.expr(node.expr1, current_klass), 'expr2': self.expr(node.expr2, current_klass), 'expr3': self.expr(node.expr3, current_klass), }) else: self.w( """ %(s)svar $pyjs__raise_expr1 = %(expr1)s; %(s)svar $pyjs__raise_expr2 = %(expr2)s; %(s)sif ($pyjs__raise_expr2 !== null && $pyjs__raise_expr1.__is_instance__ === true) { %(s)s\tthrow @{{TypeError}}('instance exception may not have a separate value'); %(s)s} %(s)sif (@{{isinstance}}($pyjs__raise_expr2, $p['tuple'])) { %(s)s\tthrow ($pyjs__raise_expr1.apply($pyjs__raise_expr1, $pyjs__raise_expr2.getArray())); %(s)s} else { %(s)s\tthrow ($pyjs__raise_expr1($pyjs__raise_expr2)); %(s)s} """ % { 's': self.spacing(), 'expr1': self.expr(node.expr1, current_klass), 'expr2': self.expr(node.expr2, current_klass), }) else: self.w( self.spacing() + "throw (%s);" % self.expr( node.expr1, current_klass)) else: if self.source_tracking: self.w( self.spacing() + "$pyjs.__active_exception_stack__ = $pyjs.__last_exception_stack__;") self.w( self.spacing() + "$pyjs.__last_exception_stack__ = null;") s = self.spacing() self.w( """\ %(s)sthrow ($pyjs.__last_exception__? %(s)s\t$pyjs.__last_exception__.error: %(s)s\t@{{TypeError}}('exceptions must be classes, instances, or strings (deprecated), not NoneType'));\ """ % locals()) self.generator_switch_case(increment=True) def _method(self, node, current_klass): save_top_level = self.top_level self.push_options() save_has_js_return = self.has_js_return self.has_js_return = False save_has_yield = self.has_yield self.has_yield = False save_is_generator = self.is_generator self.is_generator = False save_generator_states = self.generator_states self.generator_states = [0] self.state_max_depth = len(self.generator_states) save_local_prefix = self.local_prefix method_name = self.attrib_remap(node.name) jsmethod_name = self.add_lookup('method', method_name, method_name) self.local_prefix = None self.is_class_definition = None staticmethod, classmethod, decorator_code = self.parse_decorators(node, method_name, current_klass) if node.name == '__new__': staticmethod = True self.pop_lookup() self.push_lookup() arg_names = [] for arg in node.argnames: if isinstance(arg, tuple): for a in arg: arg_names.append(self.add_lookup('variable', a, a)) else: arg_names.append(self.add_lookup('variable', arg, arg)) normal_arg_names = arg_names[0:] if node.kwargs: kwargname = normal_arg_names.pop() else: kwargname = None if node.varargs: varargname = normal_arg_names.pop() else: varargname = None declared_arg_names = list(normal_arg_names) #if node.kwargs: declared_arg_names.append(kwargname) if staticmethod: function_args = "(" + ", ".join(declared_arg_names) + ")" else: function_args = "(" + ", ".join(declared_arg_names[1:]) + ")" self.w( self.indent() + "$method = $pyjs__bind_method2('"+method_name+"', function" + function_args + " {") defaults_done_by_inline = False if staticmethod: self._static_method_init(node, declared_arg_names, varargname, kwargname, current_klass) elif classmethod: self._class_method_init(node, declared_arg_names, varargname, kwargname, current_klass) else: if self.create_locals: defaults_done_by_inline = True self._instance_method_init(node, declared_arg_names, varargname, kwargname, current_klass) # default arguments if not defaults_done_by_inline: self._default_args_handler(node, declared_arg_names, current_klass, kwargname, "") local_arg_names = normal_arg_names + declared_arg_names if node.kwargs: local_arg_names.append(kwargname) if node.varargs: local_arg_names.append(varargname) self.top_level = False save_output = self.output self.output = StringIO() if self.source_tracking: self.w( self.spacing() + "$pyjs.track={module:'%s', lineno:%d};$pyjs.trackstack.push($pyjs.track);" % (self.module_name, node.lineno)) self.track_lineno(node, True) for child in node.code: self._stmt(child, current_klass) if not self.has_yield and self.source_tracking and self.has_js_return: self.source_tracking = False self.output = StringIO() for child in node.code: self._stmt(child, None) elif self.has_yield: if self.has_js_return: self.source_tracking = False self.is_generator = True self.generator_states = [0] self.output = StringIO() self.indent() if self.source_tracking: self.w( self.spacing() + "$pyjs.track={module:'%s',lineno:%d};$pyjs.trackstack.push($pyjs.track);" % (self.module_name, node.lineno)) self.track_lineno(node, True) self.generator_switch_open() self.generator_switch_case(increment=False) for child in node.code: self._stmt(child, None) self.generator_switch_case(increment=True) self.generator_switch_close() self.dedent() captured_output = self.output.getvalue() self.output = save_output self.w( self.local_js_vars_decl(local_arg_names)) if self.is_generator: self.generator(captured_output) else: self.w( captured_output, False) # we need to return null always, so it is not undefined if node.code.nodes: lastStmt = node.code.nodes[-1] else: lastStmt = None if not isinstance(lastStmt, self.ast.Return): if self.source_tracking: self.w( self.spacing() + "$pyjs.trackstack.pop();$pyjs.track=$pyjs.trackstack.pop();$pyjs.trackstack.push($pyjs.track);") if not self._isNativeFunc(lastStmt): self.w( self.spacing() + "return null;") self.w( self.dedent() + "}") bind_type = 'bound' if staticmethod: bind_type = 'static' elif classmethod: bind_type = 'class' self.pop_lookup() self.func_args(node, current_klass, None, bind_type, declared_arg_names, varargname, kwargname) self.generator_states = save_generator_states self.state_max_depth = len(self.generator_states) self.is_generator = save_is_generator self.has_yield = save_has_yield self.has_js_return = save_has_js_return self.pop_options() self.push_lookup(current_klass.name_scope) staticmethod, classmethod, decorator_code = self.parse_decorators(node, node.name, current_klass, True, bind_type) decorator_code = decorator_code % '$method' self.w( self.spacing() + "%s = %s;" % (jsmethod_name, decorator_code)) self.add_lookup('method', node.name, "@{{staticmethod}}(%s)" % jsmethod_name) self.local_prefix = save_local_prefix self.is_class_definition = True self.top_level = save_top_level def _isNativeFunc(self, node): if isinstance(node, self.ast.Discard): if isinstance(node.expr, self.ast.CallFunc): if isinstance(node.expr.node, self.ast.Name): name_type, pyname, jsname, depth, is_local = self.lookup(node.expr.node.name) if name_type == '__pyjamas__' and jsname in __pyjamas__.native_js_funcs: return True return False def _exec(self, node, current_klass): pass def _stmt(self, node, current_klass): self.track_lineno(node) if isinstance(node, self.ast.Return): self._return(node, current_klass) elif isinstance(node, self.ast.Yield): self._yield(node, current_klass) elif isinstance(node, self.ast.Break): self._break(node, current_klass) elif isinstance(node, self.ast.Continue): self._continue(node, current_klass) elif isinstance(node, self.ast.Assign): self._assign(node, current_klass) elif isinstance(node, self.ast.AugAssign): self._augassign(node, current_klass) elif isinstance(node, self.ast.Discard): self._discard(node, current_klass) elif isinstance(node, self.ast.If): self._if(node, current_klass) elif isinstance(node, self.ast.For): self._for(node, current_klass) elif isinstance(node, self.ast.While): self._while(node, current_klass) elif isinstance(node, self.ast.Subscript): self._subscript_stmt(node, current_klass) elif isinstance(node, self.ast.Global): self._global(node, current_klass) elif isinstance(node, self.ast.Pass): pass elif isinstance(node, self.ast.Function): self._function(node, current_klass) elif isinstance(node, self.ast.Printnl): self._print(node, current_klass) elif isinstance(node, self.ast.Print): self._print(node, current_klass) elif isinstance(node, self.ast.TryExcept): self._tryExcept(node, current_klass) elif isinstance(node, self.ast.TryFinally): self._tryFinally(node, current_klass) elif isinstance(node, self.ast.Raise): self._raise(node, current_klass) elif isinstance(node, self.ast.Import): self._import(node, current_klass) elif isinstance(node, self.ast.From): self._from(node, current_klass) elif isinstance(node, self.ast.AssAttr): self._assattr(node, current_klass) elif isinstance(node, self.ast.Exec): self._exec(node, current_klass) elif isinstance(node, self.ast.Assert): self._assert(node, current_klass) elif isinstance(node, self.ast.Class): self._class(node, current_klass) #elif isinstance(node, self.ast.CallFunc): # self._callfunc(node, current_klass) elif isinstance(node, self.ast.Slice): self.w( self.spacing() + self._slice(node, current_klass)) elif isinstance(node, self.ast.AssName): # TODO: support other OP_xxx types and move this to # a separate function if node.flags == "OP_DELETE": name = self._lhsFromName(node.name, current_klass) self.w( self.spacing() + "@{{_del}}(%s);" % name) else: raise TranslationError( "unsupported AssName type (in _stmt)", node, self.module_name) elif isinstance(node, self.ast.AssTuple): for node in node.nodes: self._stmt(node, current_klass) else: raise TranslationError( "unsupported type (in _stmt)", node, self.module_name) def get_start_line(self, node, lineno): if node: if hasattr(node, "lineno") and node.lineno != None and node.lineno < lineno: lineno = node.lineno if hasattr(node, 'getChildren'): for n in node.getChildren(): lineno = self.get_start_line(n, lineno) return lineno def get_line_trace(self, node): lineNum1 = "Unknown" srcLine = "" if hasattr(node, "lineno"): if node.lineno != None: lineNum2 = node.lineno lineNum1 = self.get_start_line(node, lineNum2) srcLine = self.src[min(lineNum1, len(self.src))-1].strip() if lineNum1 < lineNum2: srcLine += ' ... ' + self.src[min(lineNum2, len(self.src))-1].strip() srcLine = srcLine.replace('\\', '\\\\') srcLine = srcLine.replace('"', '\\"') srcLine = srcLine.replace("'", "\\'") return self.module_name + ".py, line " \ + str(lineNum1) + ":"\ + "\\n" \ + " " + srcLine def _augassign(self, node, current_klass): def astOP(op): if op == "+=": return self.ast.Add if op == "-=": return self.ast.Sub if op == "*=": return self.ast.Mul if op == "/=": return self.ast.Div if op == "%=": return self.ast.Mod if op == "//=": return self.ast.FloorDiv if op == "**=": return self.ast.Power if self.number_classes: if op == "&=": return self.ast.Bitand if op == "^=": return self.ast.Bitxor if op == "|=": return self.ast.Bitor if op == ">>=": return self.ast.RightShift if op == "<<=": return self.ast.LeftShift raise TranslationError( "unsupported OP (in _augassign)", node, self.module_name) v = node.node if isinstance(v, self.ast.Getattr): # XXX HACK! don't allow += on return result of getattr. # TODO: create a temporary variable or something. lhs = self.attrib_join(self._getattr(v, current_klass, False)) lhs_ass = self.ast.AssAttr(v.expr, v.attrname, "OP_ASSIGN", node.lineno) elif isinstance(v, self.ast.Name): lhs = self._name(v, current_klass) lhs_ass = self.ast.AssName(v.name, "OP_ASSIGN", node.lineno) elif isinstance(v, self.ast.Subscript) or self.operator_funcs: if len(v.subs) != 1: raise TranslationError( "must have one sub (in _assign)", v, self.module_name) lhs = self.ast.Subscript(v.expr, "OP_ASSIGN", v.subs) expr = v.expr subs = v.subs if not (isinstance(v.subs[0], self.ast.Const) or \ isinstance(v.subs[0], self.ast.Name)) or \ not isinstance(v.expr, self.ast.Name): # There's something complex here. # Neither a simple x[0] += ? # Nore a simple x[y] += ? augexpr = self.uniqid('$augexpr') augsub = self.uniqid('$augsub') self.w( self.spacing() + "var " + augsub + " = " + self.expr(subs[0], current_klass) + ";") self.add_lookup('variable', augexpr, augexpr) self.w( self.spacing() + "var " + augexpr + " = " + self.expr(expr, current_klass) + ";") self.add_lookup('variable', augsub, augsub) lhs = self.ast.Subscript(self.ast.Name(augexpr), "OP_ASSIGN", [self.ast.Name(augsub)]) v = self.ast.Subscript(self.ast.Name(augexpr), v.flags, [self.ast.Name(augsub)]) op = astOP(node.op) try: # python2.N tnode = self.ast.Assign([lhs], op((v, node.expr))) except: # lib2to3 tnode = self.ast.Assign([lhs], op(v, node.expr)) return self._assign(tnode, current_klass) else: raise TranslationError( "unsupported type (in _augassign)", v, self.module_name) try: op_ass = astOP(node.op) except: op_ass = None if not self.operator_funcs or op_ass is None: op = node.op rhs = self.expr(node.expr, current_klass) self.w( self.spacing() + lhs + " " + op + " " + rhs + ";") return if isinstance(v, self.ast.Name): self.add_lookup('global', v.name, lhs) op = astOP(node.op) try: # python2.N tnode = self.ast.Assign([lhs_ass], op((v, node.expr))) except: # lib2to3 tnode = self.ast.Assign([lhs_ass], op(v, node.expr)) return self._assign(tnode, current_klass) def _lhsFromName(self, name, current_klass, set_name_type = 'variable'): name_type, pyname, jsname, depth, is_local = self.lookup(name) if is_local: lhs = jsname self.add_lookup(set_name_type, name, jsname) elif self.top_level: if current_klass: lhs = current_klass.name + "." + name else: vname = self.modpfx() + name vname = self.add_lookup(set_name_type, name, vname) #lhs = "var " + name + " = " + vname lhs = vname else: vname = self.add_lookup(set_name_type, name, name) if self.create_locals: # hmmm... name_type, pyname, jsname, depth, is_local = self.lookup(name) if is_local: lhs = jsname self.add_lookup(set_name_type, name, jsname) else: lhs = vname else: lhs = vname return lhs def _lhsFromAttr(self, v, current_klass): if isinstance(v.expr, self.ast.Name): lhs = self._name(v.expr, current_klass) elif isinstance(v.expr, self.ast.Getattr): lhs = self.attrib_join(self._getattr(v, current_klass, False)[:-1]) elif isinstance(v.expr, self.ast.Subscript): lhs = self._subscript(v.expr, current_klass) elif isinstance(v.expr, self.ast.CallFunc): lhs = self._callfunc(v.expr, current_klass) else: raise TranslationError( "unsupported type (in _assign)", v.expr, self.module_name) return lhs def _assign(self, node, current_klass): if len(node.nodes) != 1: tempvar = self.uniqid("$assign") tnode = self.ast.Assign([self.ast.AssName(tempvar, "OP_ASSIGN", node.lineno)], node.expr, node.lineno) self._assign(tnode, current_klass) for v in node.nodes: tnode2 = self.ast.Assign([v], self.ast.Name(tempvar, node.lineno), node.lineno) self._assign(tnode2, current_klass) return dbg = 0 v = node.nodes[0] if isinstance(v, self.ast.AssAttr): attr_name = self.attrib_remap(v.attrname) rhs = self.expr(node.expr, current_klass) lhs = self._lhsFromAttr(v, current_klass) if v.flags == "OP_ASSIGN": op = "=" else: raise TranslationError( "unsupported flag (in _assign)", v, self.module_name) if self.getattr_support and not self.descriptors: # getattr support implies the use of setattr code = "@{{setattr}}(%(l)s, '%(a)s', %(r)s);" self.w( self.spacing() + code % {'l': lhs, 'a': attr_name, 'r': rhs}) return if self.descriptors: desc_setattr = [ "%(l)s.__is_instance__ &&", "typeof %(l)s.__setattr__ == 'function' ?", "%(l)s.__setattr__('%(a)s', %(r)s) :", "@{{setattr}}(%(l)s, '%(a)s', %(r)s);", ] self.w( self.spacing() + ' '.join(desc_setattr) % {'l': lhs, 'a': attr_name, 'r': rhs}) return lhs += '.' + attr_name elif isinstance(v, self.ast.AssName): rhs = self.expr(node.expr, current_klass) lhs = self._lhsFromName(v.name, current_klass) if v.flags == "OP_ASSIGN": op = "=" else: raise TranslationError( "unsupported flag (in _assign)", v, self.module_name) elif isinstance(v, self.ast.Subscript): if v.flags == "OP_ASSIGN": obj = self.expr(v.expr, current_klass) if len(v.subs) != 1: raise TranslationError( "must have one sub (in _assign)", v, self.module_name) idx = self.expr(v.subs[0], current_klass) value = self.expr(node.expr, current_klass) self.w( self.spacing() + self.track_call(obj + ".__setitem__(" + idx + ", " + value + ")", v.lineno) + ';') return else: raise TranslationError( "unsupported flag (in _assign)", v, self.module_name) elif isinstance(v, self.ast.Slice): if v.flags == "OP_ASSIGN": if not v.lower: lower = 0 else: lower = self.expr(v.lower, current_klass) if not v.upper: upper = 'null' else: upper = self.expr(v.upper, current_klass) obj = self.expr(v.expr, current_klass) value = self.expr(node.expr, current_klass) self.w( self.spacing() + self.track_call("@{{__setslice}}(%s, %s, %s, %s)" % (obj, lower, upper, value), v.lineno) + ';') return else: raise TranslationError( "unsupported flag (in _assign)", v, self.module_name) elif isinstance(v, (self.ast.AssList, self.ast.AssTuple)): tempName = self.uniqid("$tupleassign") self.w( self.spacing() + "var " + tempName + " = " + \ self.expr(node.expr, current_klass) + ";") for index,child in enumerate(v.getChildNodes()): rhs = self.track_call(tempName + ".__getitem__(" + str(index) + ")", v.lineno) if isinstance(child, self.ast.AssAttr): lhs = self._lhsFromAttr(child, current_klass) + '.' + self.attrib_remap(child.attrname) elif isinstance(child, self.ast.AssName): lhs = self._lhsFromName(child.name, current_klass) elif isinstance(child, self.ast.Subscript): if child.flags == "OP_ASSIGN": obj = self.expr(child.expr, current_klass) if len(child.subs) != 1: raise TranslationError("must have one sub " + "(in _assign)", child, self.module_name) idx = self.expr(child.subs[0], current_klass) value = self.expr(node.expr, current_klass) self.w( self.spacing() + self.track_call(obj + ".__setitem__(" \ + idx + ", " + rhs + ")", v.lineno) + ';') continue elif isinstance(child, self.ast.Slice): if child.flags == "OP_ASSIGN": if not child.lower: lower = 0 else: lower = self.expr(child.lower, current_klass) if not child.upper: upper = 'null' else: upper = self.expr(child.upper, current_klass) obj = self.expr(child.expr, current_klass) self.w( self.spacing() + self.track_call("@{{__setslice}}" "(%s, %s, %s, %s)" % (obj, lower, upper, rhs) , v.lineno) + ';') continue else: raise TranslationError( "unsupported flag (in _assign)", v, self.module_name) else: raise TranslationError( "unsupported type in assignment list", v, self.module_name) self.w( self.spacing() + lhs + " = " + rhs + ";") return else: raise TranslationError( "unsupported type (in _assign)", v, self.module_name) if dbg: print "b", repr(node.expr), rhs self.w( self.spacing() + lhs + " " + op + " " + rhs + ";") def _discard(self, node, current_klass): if isinstance(node.expr, self.ast.CallFunc): expr = self._callfunc( node.expr, current_klass, is_statement=True, optlocal_var=isinstance(node.expr.node, self.ast.Name), ) if isinstance(node.expr.node, self.ast.Name): name_type, pyname, jsname, depth, is_local = self.lookup(node.expr.node.name) if name_type == '__pyjamas__' and \ jsname in __pyjamas__.native_js_funcs: self.w( expr) return self.w( self.spacing() + expr + ";") elif isinstance(node.expr, self.ast.Const): # we can safely remove all constants that are discarded, # e.g None fo empty expressions after a unneeded ";" or # mostly important to remove doc strings if node.expr.value in ["@CONSTANT_DECLARATION@", "@ATTRIB_REMAP_DECLARATION@"]: self.w( node.expr.value) return elif isinstance(node.expr, self.ast.Yield): self._yield(node.expr, current_klass) else: raise TranslationError( "unsupported type, must be call or const (in _discard)", node.expr, self.module_name) def _if(self, node, current_klass): save_is_generator = self.is_generator if self.is_generator: self.is_generator = self.compiler.walk(node, GeneratorExitVisitor(), walker=GeneratorExitVisitor()).has_yield if self.is_generator: self.w( self.spacing() + "$generator_state[%d] = 0;" % (len(self.generator_states)+1,)) self.generator_switch_case(increment=True) self.generator_add_state() for i in range(len(node.tests)): test, consequence = node.tests[i] if i == 0: keyword = "if" else: keyword = "else if" self.lookup_stack[-1] self._if_test(keyword, test, consequence, node, current_klass) if node.else_: keyword = "else" test = None consequence = node.else_ self._if_test(keyword, test, consequence, node, current_klass) if self.is_generator: self.w( self.spacing() + "$generator_state[%d]=0;" % (len(self.generator_states)-1,)) self.generator_del_state() self.is_generator = save_is_generator def _if_test(self, keyword, test, consequence, node, current_klass): if test: expr = self.expr(test, current_klass) if not self.is_generator: self.w( self.indent() +keyword + " (" + self.track_call(self.inline_bool_code(expr), test.lineno)+") {") else: self.generator_states[-1] += 1 self.w( self.indent() +keyword + "(($generator_state[%d]==%d)||($generator_state[%d]<%d&&(" % (\ len(self.generator_states)-1, self.generator_states[-1], len(self.generator_states)-1, self.generator_states[-1],) + \ self.track_call(self.inline_bool_code(expr), test.lineno)+"))) {") self.w( self.spacing() + "$generator_state[%d]=%d;" % (len(self.generator_states)-1, self.generator_states[-1])) else: if not self.is_generator: self.w( self.indent() + keyword + " {") else: self.generator_states[-1] += 1 self.w( self.indent() + keyword + " if ($generator_state[%d]==0||$generator_state[%d]==%d) {" % (\ len(self.generator_states)-1, len(self.generator_states)-1, self.generator_states[-1], )) self.w( self.spacing() + "$generator_state[%d]=%d;" % (len(self.generator_states)-1, self.generator_states[-1])) if self.is_generator: self.generator_add_state() self.generator_switch_open() self.generator_switch_case(increment=False) if isinstance(consequence, self.ast.Stmt): for child in consequence.nodes: self._stmt(child, current_klass) else: raise TranslationError( "unsupported type (in _if_test)", consequence, self.module_name) if self.is_generator: self.generator_switch_case(increment=True) self.generator_switch_close() self.generator_del_state() self.w( self.dedent() + "}") def _compare(self, node, current_klass): lhs = self.expr(node.expr, current_klass) if len(node.ops) != 1: cmp = [] for op, rhs_node in node.ops: rhsname = self.uniqid("$compare") rhs = self.expr(rhs_node, current_klass) rhs = "(%s = %s)" % (rhsname, rhs) cmp.append(self.compare_code(op, lhs, rhs)) lhs = rhsname return "(%s)" % "&&".join(cmp) raise TranslationError( "only one ops supported (in _compare)", node, self.module_name) op = node.ops[0][0] rhs_node = node.ops[0][1] rhs = self.expr(rhs_node, current_klass) return self.compare_code(op, lhs, rhs) def compare_code(self, op, lhs, rhs): if op == "==": if not self.stupid_mode: return self.inline_eq_code(lhs, rhs) if op == "!=": if not self.stupid_mode: return "!"+self.inline_eq_code(lhs, rhs) if op == "<": if not self.stupid_mode: return "(%s == -1)" % self.inline_cmp_code(lhs, rhs) if op == "<=": if not self.stupid_mode: return "(%s < 1)" % self.inline_cmp_code(lhs, rhs) if op == ">": if not self.stupid_mode: return "(%s == 1)" % self.inline_cmp_code(lhs, rhs) if op == ">=": if not self.stupid_mode: return "(((%s)|1) == 1)" % self.inline_cmp_code(lhs, rhs) if op == "in": return rhs + ".__contains__(" + lhs + ")" elif op == "not in": return "!" + rhs + ".__contains__(" + lhs + ")" if op == "is": if self.number_classes: return "@{{op_is}}(%s, %s)" % (lhs, rhs) op = "===" if op == "is not": if self.number_classes: return "!@{{op_is}}(%s, %s)" % (lhs, rhs) op = "!==" return "(" + lhs + " " + op + " " + rhs + ")" def _not(self, node, current_klass): expr = self.expr(node.expr, current_klass) if self.stupid_mode: return "(!(%s))" % expr return "!" + self.inline_bool_code(expr) def _or(self, node, current_klass): if self.stupid_mode: return " || ".join(map(bracket_fn, [self.expr(child, current_klass) for child in node.nodes])) s = self.spacing() expr = "@EXPR@" for e in [self.expr(child, current_klass) for child in node.nodes[:-1]]: v = self.uniqid('$or') self.add_lookup('variable', v, v) bool = self.inline_bool_code("%(v)s=%(e)s" % locals()) expr = expr.replace('@EXPR@', "(%(bool)s?%(v)s:@EXPR@)" % locals()) v = self.uniqid('$or') self.add_lookup('variable', v, v) return expr.replace('@EXPR@', self.expr(node.nodes[-1], current_klass)) expr = ",".join([self.expr(child, current_klass) for child in node.nodes]) return "@{{op_or}}([%s])" % expr def _and(self, node, current_klass): if self.stupid_mode: return " && ".join(map(bracket_fn, [self.expr(child, current_klass) for child in node.nodes])) s = self.spacing() expr = "@EXPR@" for e in [self.expr(child, current_klass) for child in node.nodes[:-1]]: v = self.uniqid('$and') self.add_lookup('variable', v, v) bool = self.inline_bool_code("%(v)s=%(e)s" % locals()) expr = expr.replace('@EXPR@', "(%(bool)s?@EXPR@:%(v)s)" % locals()) v = self.uniqid('$and') self.add_lookup('variable', v, v) return expr.replace('@EXPR@', self.expr(node.nodes[-1], current_klass)) expr = ",".join([self.expr(child, current_klass) for child in node.nodes]) return "@{{op_and}}([%s])" % expr def _for(self, node, current_klass): save_is_generator = self.is_generator if self.is_generator: self.is_generator = self.compiler.walk(node, GeneratorExitVisitor(), walker=GeneratorExitVisitor()).has_yield assign_name = "" assign_tuple = [] iterid = self.uniqid('$iter') iterator_name = "%s_iter" % iterid self.add_lookup('variable', iterator_name, iterator_name) nextval = "%s_nextval" % iterid self.add_lookup('variable', nextval, nextval) gentype = "%s_type" % iterid self.add_lookup('variable', gentype, gentype) array = "%s_array" % iterid self.add_lookup('variable', array, array) loopvar = "%s_idx" % iterid self.add_lookup('variable', loopvar, loopvar) if node.else_: testvar = "%s_test" % iterid self.add_lookup('variable', testvar, testvar) assTestvar = "%s_test = " % iterid else: assTestvar = "" reuse_tuple = "false" if isinstance(node.assign, self.ast.AssName): assign_name = self.add_lookup('variable', node.assign.name, node.assign.name) if node.assign.flags == "OP_ASSIGN": op = "=" elif isinstance(node.assign, self.ast.AssTuple): reuse_tuple = "true" op = "=" i = 0 for child in node.assign: child_name = child.name self.add_lookup('variable', child_name, child_name) child_name = self.add_lookup('variable', child_name, child_name) if self.inline_code: assign_tuple.append("""%(child_name)s %(op)s %(nextval)s.__array[%(i)i];""" % locals()) else: assign_tuple.append("""%(child_name)s %(op)s %(nextval)s.$nextval.__array[%(i)i];""" % locals()) i += 1 else: raise TranslationError( "unsupported type (in _for)", node.assign, self.module_name) if isinstance(node.list, self.ast.Name): list_expr = self._name(node.list, current_klass) elif isinstance(node.list, self.ast.Getattr): list_expr = self.attrib_join(self._getattr(node.list, current_klass)) elif isinstance(node.list, self.ast.CallFunc): list_expr = self._callfunc(node.list, current_klass) elif isinstance(node.list, self.ast.Subscript): list_expr = self._subscript(node.list, current_klass) elif isinstance(node.list, self.ast.Const): list_expr = self._const(node.list) elif isinstance(node.list, self.ast.List): list_expr = self._list(node.list, current_klass) elif isinstance(node.list, self.ast.Slice): list_expr = self._slice(node.list, current_klass) elif isinstance(node.list, self.ast.ListComp): list_expr = self._listcomp(node.list, current_klass) elif isinstance(node.list, self.ast.Tuple): list_expr = self._tuple(node.list, current_klass) elif isinstance(node.list, self.ast.Add): list_expr = self._add(node.list, current_klass) else: raise TranslationError( "unsupported type (in _for)", node.list, self.module_name) if not assign_tuple: assign_name = self.add_lookup('variable', assign_name, assign_name) if self.source_tracking: self.stacksize_depth += 1 var_trackstack_size = "$pyjs__trackstack_size_%d" % self.stacksize_depth self.add_lookup('variable', var_trackstack_size, var_trackstack_size) self.w( self.spacing() + "%s=$pyjs.trackstack.length;" % var_trackstack_size) s = self.spacing() if self.inline_code: self.w( """\ %(s)s%(iterator_name)s = """ % locals() + self.track_call("%(list_expr)s" % locals(), node.lineno) + ';') self.w( """\ %(s)sif (typeof (%(array)s = %(iterator_name)s.__array) != 'undefined') { %(s)s\t%(gentype)s = 0; %(s)s} else { %(s)s\t%(iterator_name)s = %(iterator_name)s.__iter__(); %(s)s\t%(gentype)s = typeof (%(array)s = %(iterator_name)s.__array) != 'undefined'? 0 : (typeof %(iterator_name)s.$genfunc == 'function'? 1 : -1); %(s)s} %(s)s%(loopvar)s = 0;""" % locals()) condition = "typeof (%(nextval)s=(%(gentype)s?(%(gentype)s > 0?%(iterator_name)s.next(true,%(reuse_tuple)s):@{{wrapped_next}}(%(iterator_name)s)):%(array)s[%(loopvar)s++])) != 'undefined'" % locals() else: self.w( """\ %(s)s%(iterator_name)s = """ % locals() + self.track_call("%(list_expr)s" % locals(), node.lineno) + ';') self.w( """\ %(s)s%(nextval)s=@{{__iter_prepare}}(%(iterator_name)s,%(reuse_tuple)s);\ """ % locals()) condition = "typeof(@{{__wrapped_next}}(%(nextval)s).$nextval) != 'undefined'" % locals() self.generator_switch_case(increment=True) if self.is_generator: self.w( self.spacing() + "$generator_state[%d] = 0;" % (len(self.generator_states), )) self.generator_switch_case(increment=True) self.w( self.indent() + "for (;%s($generator_state[%d] > 0 || %s);$generator_state[%d] = 0) {" % (assTestvar, len(self.generator_states), condition, len(self.generator_states), )) else: self.w( self.indent() + """while (%s%s) {""" % (assTestvar, condition)) self.generator_add_state() self.generator_switch_open() self.generator_switch_case(increment=False) if not assign_tuple: if self.inline_code: self.w( self.spacing() + """%(assign_name)s %(op)s %(nextval)s;""" % locals()) else: self.w( self.spacing() + """%(assign_name)s %(op)s %(nextval)s.$nextval;""" % locals()) else: for line in assign_tuple: self.w( self.spacing() + line) for n in node.body.nodes: self._stmt(n, current_klass) self.generator_switch_case(increment=True) self.generator_switch_close() self.generator_del_state() self.w( self.dedent() + "}") if node.else_: self.generator_switch_case(increment=True) self.w( self.indent() + "if (!%(testvar)s) {" % locals()) for n in node.else_.nodes: self._stmt(n, current_klass) self.w( self.dedent() + "}") if self.source_tracking: self.w( """\ %(s)sif ($pyjs.trackstack.length > $pyjs__trackstack_size_%(d)d) { %(s)s\t$pyjs.trackstack = $pyjs.trackstack.slice(0,$pyjs__trackstack_size_%(d)d); %(s)s\t$pyjs.track = $pyjs.trackstack.slice(-1)[0]; %(s)s} %(s)s$pyjs.track.module='%(m)s';""" % {'s': self.spacing(), 'd': self.stacksize_depth, 'm': self.module_name}) self.stacksize_depth -= 1 self.generator_switch_case(increment=True) self.is_generator = save_is_generator def _while(self, node, current_klass): save_is_generator = self.is_generator if self.is_generator: self.is_generator = self.compiler.walk(node, GeneratorExitVisitor(), walker=GeneratorExitVisitor()).has_yield test = self.expr(node.test, current_klass) if self.is_generator: self.generator_switch_case(increment=True) self.generator_reset_state() self.generator_switch_case(increment=True) self.w( self.indent() + "for (;($generator_state[%d] > 0)||(" % (\ (len(self.generator_states),)) + \ self.track_call(self.inline_bool_code(test), node.lineno) + ");$generator_state[%d] = 0) {" % (len(self.generator_states), )) self.generator_add_state() self.generator_switch_open() self.generator_switch_case(increment=False) else: self.w( self.indent() + "while (" + self.track_call(self.inline_bool_code(test), node.lineno) + ") {") if isinstance(node.body, self.ast.Stmt): for child in node.body.nodes: self._stmt(child, current_klass) else: raise TranslationError( "unsupported type (in _while)", node.body, self.module_name) if self.is_generator: self.generator_switch_case(increment=True) self.generator_switch_close() self.generator_del_state() self.w( self.dedent() + "}") self.generator_switch_case(increment=True) self.is_generator = save_is_generator def _const(self, node): if isinstance(node.value, int): if not self.number_classes: return str(node.value) self.constant_int[node.value] = 1 return "$constant_int_%s" % str(node.value) elif isinstance(node.value, long): v = str(node.value) if v[-1] == 'L': v = v[:-1] if not self.number_classes: return v self.constant_long[node.value] = 1 return "$constant_long_%s" % v elif isinstance(node.value, float): return str(node.value) elif isinstance(node.value, basestring): v = node.value if isinstance(node.value, unicode): v = v.encode('utf-8') return "'%s'" % escapejs(v) elif node.value is None: return "null" else: raise TranslationError( "unsupported type (in _const)", node, self.module_name) def _unaryadd(self, node, current_klass): if not self.operator_funcs: return "(%s)" % self.expr(node.expr, current_klass) e = self.expr(node.expr, current_klass) v = self.uniqid('$uadd') s = self.spacing() return """(typeof (%(v)s=%(e)s)=='number'? %(s)s\t%(v)s: %(s)s\t@{{op_uadd}}(%(v)s))""" % locals() def _unarysub(self, node, current_klass): if not self.operator_funcs: return "-(%s)" % self.expr(node.expr, current_klass) e = self.expr(node.expr, current_klass) v = self.uniqid('$usub') s = self.spacing() return """(typeof (%(v)s=%(e)s)=='number'? %(s)s\t-%(v)s: %(s)s\t@{{op_usub}}(%(v)s))""" % locals() def _add(self, node, current_klass): if not self.operator_funcs: return "(%s)+(%s)" % (self.expr(node.left, current_klass), self.expr(node.right, current_klass)) e1 = self.expr(node.left, current_klass) e2 = self.expr(node.right, current_klass) v1 = self.uniqid('$add') v2 = self.uniqid('$add') self.add_lookup('variable', v1, v1) self.add_lookup('variable', v2, v2) s = self.spacing() if self.inline_code: return """(typeof (%(v1)s=%(e1)s)==typeof (%(v2)s=%(e2)s) && (typeof %(v1)s=='number'||typeof %(v1)s=='string')? %(s)s\t%(v1)s+%(v2)s: %(s)s\t@{{op_add}}(%(v1)s,%(v2)s))""" % locals() return """@{{__op_add}}(%(v1)s=%(e1)s,%(v2)s=%(e2)s)""" % \ locals() def _sub(self, node, current_klass): if not self.operator_funcs: return "(%s)-(%s)" % (self.expr(node.left, current_klass), self.expr(node.right, current_klass)) e1 = self.expr(node.left, current_klass) e2 = self.expr(node.right, current_klass) v1 = self.uniqid('$sub') v2 = self.uniqid('$sub') self.add_lookup('variable', v1, v1) self.add_lookup('variable', v2, v2) s = self.spacing() if self.inline_code: return """(typeof (%(v1)s=%(e1)s)==typeof (%(v2)s=%(e2)s) && (typeof %(v1)s=='number'||typeof %(v1)s=='string')? %(s)s\t%(v1)s-%(v2)s: %(s)s\t@{{op_sub}}(%(v1)s,%(v2)s))""" % locals() return """@{{__op_sub}}(%(v1)s=%(e1)s,%(v2)s=%(e2)s)""" % \ locals() def _floordiv(self, node, current_klass): if not self.operator_funcs: return "Math.floor(%s/%s)" % (self.expr(node.left, current_klass), self.expr(node.right, current_klass)) e1 = self.expr(node.left, current_klass) e2 = self.expr(node.right, current_klass) v1 = self.uniqid('$floordiv') v2 = self.uniqid('$floordiv') self.add_lookup('variable', v1, v1) self.add_lookup('variable', v2, v2) s = self.spacing() return """(typeof (%(v1)s=%(e1)s)==typeof (%(v2)s=%(e2)s) && typeof %(v1)s=='number' && %(v2)s !== 0? %(s)s\tMath.floor(%(v1)s/%(v2)s): %(s)s\t@{{op_floordiv}}(%(v1)s,%(v2)s))""" % locals() def _div(self, node, current_klass): if not self.operator_funcs: return "(%s)/(%s)" % (self.expr(node.left, current_klass), self.expr(node.right, current_klass)) e1 = self.expr(node.left, current_klass) e2 = self.expr(node.right, current_klass) v1 = self.uniqid('$div') v2 = self.uniqid('$div') self.add_lookup('variable', v1, v1) self.add_lookup('variable', v2, v2) s = self.spacing() op_div = 'op_div' if self.future_division else 'op_div' op_div = 'op_truediv' if self.future_division else 'op_div' return """(typeof (%(v1)s=%(e1)s)==typeof (%(v2)s=%(e2)s) && typeof %(v1)s=='number' && %(v2)s !== 0? %(s)s\t%(v1)s/%(v2)s: %(s)s\t@{{%(op_div)s}}(%(v1)s,%(v2)s))""" % locals() def _mul(self, node, current_klass): if not self.operator_funcs: return "(%s)*(%s)" % (self.expr(node.left, current_klass), self.expr(node.right, current_klass)) e1 = self.expr(node.left, current_klass) e2 = self.expr(node.right, current_klass) v1 = self.uniqid('$mul') v2 = self.uniqid('$mul') self.add_lookup('variable', v1, v1) self.add_lookup('variable', v2, v2) s = self.spacing() return """(typeof (%(v1)s=%(e1)s)==typeof (%(v2)s=%(e2)s) && typeof %(v1)s=='number'? %(s)s\t%(v1)s*%(v2)s: %(s)s\t@{{op_mul}}(%(v1)s,%(v2)s))""" % locals() def _mod(self, node, current_klass): if isinstance(node.left, self.ast.Const) and isinstance(node.left.value, StringType): return self.track_call("@{{sprintf}}("+self.expr(node.left, current_klass) + ", " + self.expr(node.right, current_klass)+")", node.lineno) e1 = self.expr(node.left, current_klass) e2 = self.expr(node.right, current_klass) if self.stupid_mode: return "(%(e1)s) %% (%(e2)s)" % locals() v1 = self.uniqid('$mod') v2 = self.uniqid('$mod') self.add_lookup('variable', v1, v1) self.add_lookup('variable', v2, v2) s = self.spacing() if not self.operator_funcs: return """((%(v1)s=%(e1)s)!=null && (%(v2)s=%(e2)s)!=null && typeof %(v1)s=='string'? %(s)s\t@{{sprintf}}(%(v1)s,%(v2)s): %(s)s\t((%(v1)s=%(v1)s%%%(v2)s)<0&&%(v2)s>0?%(v1)s+%(v2)s:%(v1)s))""" % locals() return """(typeof (%(v1)s=%(e1)s)==typeof (%(v2)s=%(e2)s) && typeof %(v1)s=='number'? %(s)s\t((%(v1)s=%(v1)s%%%(v2)s)<0&&%(v2)s>0?%(v1)s+%(v2)s:%(v1)s): %(s)s\t@{{op_mod}}(%(v1)s,%(v2)s))""" % locals() def _power(self, node, current_klass): if not self.operator_funcs: return "Math.pow(%s,%s)" % (self.expr(node.left, current_klass), self.expr(node.right, current_klass)) e1 = self.expr(node.left, current_klass) e2 = self.expr(node.right, current_klass) v1 = self.uniqid('$pow') v2 = self.uniqid('$pow') self.add_lookup('variable', v1, v1) self.add_lookup('variable', v2, v2) s = self.spacing() return """(typeof (%(v1)s=%(e1)s)==typeof (%(v2)s=%(e2)s) && typeof %(v1)s=='number'? %(s)s\tMath.pow(%(v1)s,%(v2)s): %(s)s\t@{{op_pow}}(%(v1)s,%(v2)s))""" % locals() def _invert(self, node, current_klass): if not self.operator_funcs or not self.number_classes: return "~(%s)" % self.expr(node.expr, current_klass) return "@{{op_invert}}(%s)" % self.expr(node.expr, current_klass) def _bitshiftleft(self, node, current_klass): if not self.operator_funcs or not self.number_classes: return "(%s)<<(%s)"% (self.expr(node.left, current_klass), self.expr(node.right, current_klass)) return "@{{op_bitshiftleft}}(%s,%s)" % (self.expr(node.left, current_klass), self.expr(node.right, current_klass)) def _bitshiftright(self, node, current_klass): if not self.operator_funcs or not self.number_classes: return "(%s)>>(%s)" % (self.expr(node.left, current_klass), self.expr(node.right, current_klass)) return "@{{op_bitshiftright}}(%s,%s)" % (self.expr(node.left, current_klass), self.expr(node.right, current_klass)) def _bitand(self, node, current_klass): if not self.operator_funcs or not self.number_classes: return "(%s)" % ")&(".join([self.expr(child, current_klass) for child in node.nodes]) if len(node.nodes) == 2: return "@{{op_bitand2}}(%s, %s)" % (self.expr(node.nodes[0], current_klass), self.expr(node.nodes[1], current_klass)) return "@{{op_bitand}}([%s])" % ", ".join([self.expr(child, current_klass) for child in node.nodes]) def _bitxor(self,node, current_klass): if not self.operator_funcs or not self.number_classes: return "(%s)" % ")^(".join([self.expr(child, current_klass) for child in node.nodes]) if len(node.nodes) == 2: return "@{{op_bitxor2}}(%s, %s)" % (self.expr(node.nodes[0], current_klass), self.expr(node.nodes[1], current_klass)) return "@{{op_bitxor}}([%s])" % ", ".join([self.expr(child, current_klass) for child in node.nodes]) def _bitor(self, node, current_klass): if not self.operator_funcs or not self.number_classes: return "(%s)" % ")|(".join([self.expr(child, current_klass) for child in node.nodes]) if len(node.nodes) == 2: return "@{{op_bitor2}}(%s, %s)" % (self.expr(node.nodes[0], current_klass), self.expr(node.nodes[1], current_klass)) return "@{{op_bitor}}([%s])" % ", ".join([self.expr(child, current_klass) for child in node.nodes]) def _subscript(self, node, current_klass): if node.flags == "OP_APPLY": if len(node.subs) == 1: return self.inline_getitem_code(self.expr(node.expr, current_klass), self.expr(node.subs[0], current_klass)) else: raise TranslationError( "must have one sub (in _subscript)", node, self.module_name) else: raise TranslationError( "unsupported flag (in _subscript)", node, self.module_name) def _subscript_stmt(self, node, current_klass): if node.flags == "OP_DELETE": self.w( self.spacing() + self.track_call(self.expr(node.expr, current_klass) + ".__delitem__(" + self.expr(node.subs[0], current_klass) + ")", node.lineno) + ';') else: raise TranslationError( "unsupported flag (in _subscript)", node, self.module_name) def _assattr(self, node, current_klass): attr_name = self.attrib_remap(node.attrname) lhs = self._lhsFromAttr(node, current_klass) if node.flags == "OP_DELETE": self.w( self.spacing() + "@{{delattr}}(%s, '%s');" % (lhs, attr_name)) else: raise TranslationError( "unsupported flag (in _assign)", v, self.module_name) def _assname(self, node, current_klass): name_type, pyname, jsname, depth, is_local = self.lookup(node.name) if node.flags == "OP_DELETE": self.w( self.spacing() + "delete %s;" % (jsname,)) else: raise TranslationError( "unsupported flag (in _assign)", v, self.module_name) def _list(self, node, current_klass): return self.track_call("$p['list']([" + ", ".join([self.expr(x, current_klass) for x in node.nodes]) + "])", node.lineno) def _dict(self, node, current_klass): items = [] for x in node.items: key = self.expr(x[0], current_klass) value = self.expr(x[1], current_klass) items.append("[" + key + ", " + value + "]") return self.track_call("$p['dict']([" + ", ".join(items) + "])") def _tuple(self, node, current_klass): return self.track_call("$p['tuple']([" + ", ".join([self.expr(x, current_klass) for x in node.nodes]) + "])", node.lineno) def _lambda(self, node, current_klass): save_local_prefix, self.local_prefix = self.local_prefix, None save_is_class_definition, self.is_class_definition = self.is_class_definition, False function_name = self.uniqid("$lambda") self.w( self.spacing() + "var", False) code_node = self.ast.Stmt([self.ast.Return(node.code, node.lineno)], node.lineno) try: # python2.N func_node = self.ast.Function(None, function_name, node.argnames, node.defaults, node.flags, None, code_node, node.lineno) except: # lib2to3 func_node = self.ast.Function(None, function_name, node.argnames, node.defaults, node.varargs, node.kwargs, None, code_node, node.lineno) self._function(func_node, current_klass, True) self.local_prefix = save_local_prefix self.is_class_definition = save_is_class_definition return function_name def _listcomp(self, node, current_klass): self.push_lookup() resultlist = self.uniqid("$listcomp") self.add_lookup('variable', resultlist, resultlist) save_output = self.output self.output = StringIO() tnode = self.ast.Discard(self.ast.CallFunc(self.ast.Getattr(self.ast.Name(resultlist), 'append'), [node.expr], None, None)) for qual in node.quals[::-1]: if len(qual.ifs) > 1: raise TranslationError( "unsupported ifs (in _listcomp)", node, self.module_name) tassign = qual.assign tlist = qual.list tbody = self.ast.Stmt([tnode]) if len(qual.ifs) == 1: tbody = self.ast.Stmt([self.ast.If([(qual.ifs[0].test, tbody)], None, qual.ifs[0].lineno)]) telse_ = None tnode = self.ast.For(tassign, tlist, tbody, telse_, node.lineno) self._for(tnode, current_klass) captured_output = self.output self.output = save_output listcomp_code = """\ function(){ \t%s \t%s = $p['list'](); %s \treturn %s;}()""" % ( self.local_js_vars_decl([]), resultlist, captured_output.getvalue(), resultlist, ) self.pop_lookup() return listcomp_code def _genexpr(self, node, current_klass): save_has_yield = self.has_yield self.has_yield = True save_is_generator = self.is_generator self.is_generator = True save_generator_states = self.generator_states self.generator_states = [0] self.state_max_depth = len(self.generator_states) self.push_options() self.source_tracking = self.debug = False if not isinstance(node.code, self.ast.GenExprInner): raise TranslationError( "unsupported code (in _genexpr)", node, self.module_name) if node.argnames != ['.0']: raise TranslationError( "argnames not supported (in _genexpr)", node, self.module_name) if node.kwargs: raise TranslationError( "kwargs not supported (in _genexpr)", node, self.module_name) if node.varargs: raise TranslationError( "varargs not supported (in _genexpr)", node, self.module_name) save_output = self.output self.output = StringIO() self.indent() self.generator_switch_open() self.generator_switch_case(increment=False) tnode = self.ast.Yield(node.code.expr, node.lineno) for qual in node.code.quals[::-1]: if isinstance(qual, self.ast.GenExprFor): if len(qual.ifs) > 1: raise TranslationError( "unsupported ifs (in _genexpr)", node.code, self.module_name) tassign = qual.assign titer = qual.iter tbody = self.ast.Stmt([tnode]) tis_outmost = qual.is_outmost if len(qual.ifs) == 1: tbody = self.ast.Stmt([self.ast.If([(qual.ifs[0].test, tbody)], None, qual.ifs[0].lineno)]) telse_ = None tnode = self.ast.For(tassign, titer, tbody, telse_, node.lineno) self._for(tnode, current_klass) else: raise TranslationError( "unsupported quals (in _genexpr)", node.code, self.module_name) self.generator_switch_case(increment=True) self.generator_switch_close() captured_output = self.output.getvalue() self.output = StringIO() self.w( "function(){") self.generator(captured_output) self.w( self.dedent() + "}()") captured_output = self.output.getvalue() self.output = save_output self.generator_states = save_generator_states self.state_max_depth = len(self.generator_states) self.is_generator = save_is_generator self.has_yield = save_has_yield self.pop_options() return captured_output def _slice(self, node, current_klass): lower = "0" upper = "null" if node.lower != None: lower = self.expr(node.lower, current_klass) if node.upper != None: upper = self.expr(node.upper, current_klass) if node.flags == "OP_APPLY": return "@{{slice}}(" + self.expr(node.expr, current_klass) + ", " + lower + ", " + upper + ")" elif node.flags == "OP_DELETE": return "@{{__delslice}}(" + self.expr(node.expr, current_klass) + ", " + lower + ", " + upper + ");" else: raise TranslationError( "unsupported flag (in _slice)", node, self.module_name) def _global(self, node, current_klass): for name in node.names: name_type, pyname, jsname, depth, is_local = self.lookup(name) if name_type is None: # Not defined yet. name_type = 'variable' pyname = name jsname = self.scopeName(name, depth, is_local) else: name_type = 'global' self.add_lookup(name_type, pyname, jsname) def _if_expr(self, node, current_klass): test = self.expr(node.test, current_klass) then = self.expr(node.then, current_klass) else_ = self.expr(node.else_, current_klass) return "(" + self.inline_bool_code(test) + "? (%(then)s) : (%(else_)s))" % locals() def _backquote(self, node, current_klass): return "@{{repr}}(%s)" % self.expr(node.expr, current_klass) def expr(self, node, current_klass): if isinstance(node, self.ast.Const): return self._const(node) # @@@ not sure if the parentheses should be here or in individual operator functions - JKT elif isinstance(node, self.ast.Mul): return self._mul(node, current_klass) elif isinstance(node, self.ast.Add): return self._add(node, current_klass) elif isinstance(node, self.ast.Sub): return self._sub(node, current_klass) elif isinstance(node, self.ast.Div): return self._div(node, current_klass) elif isinstance(node, self.ast.FloorDiv): return self._floordiv(node, current_klass) elif isinstance(node, self.ast.Mod): return self._mod(node, current_klass) elif isinstance(node, self.ast.Power): return self._power(node, current_klass) elif isinstance(node, self.ast.UnaryAdd): return self._unaryadd(node, current_klass) elif isinstance(node, self.ast.UnarySub): return self._unarysub(node, current_klass) elif isinstance(node, self.ast.Not): return self._not(node, current_klass) elif isinstance(node, self.ast.Or): return self._or(node, current_klass) elif isinstance(node, self.ast.And): return self._and(node, current_klass) elif isinstance(node, self.ast.Invert): return self._invert(node, current_klass) elif isinstance(node,self.ast.LeftShift): return self._bitshiftleft(node, current_klass) elif isinstance(node, self.ast.RightShift): return self._bitshiftright(node, current_klass) elif isinstance(node, self.ast.Bitand): return self._bitand(node, current_klass) elif isinstance(node, self.ast.Bitxor): return self._bitxor(node, current_klass) elif isinstance(node, self.ast.Bitor): return self._bitor(node, current_klass) elif isinstance(node, self.ast.Compare): return self._compare(node, current_klass) elif isinstance(node, self.ast.CallFunc): return self._callfunc(node, current_klass, optlocal_var=True) elif isinstance(node, self.ast.Name): return self._name(node, current_klass, optlocal_var=True) elif isinstance(node, self.ast.Subscript): return self._subscript(node, current_klass) elif isinstance(node, self.ast.Getattr): attr_ = self._getattr(node, current_klass) if len(attr_) == 1: return attr_[0] attr = self.attrib_join(attr_) attr_left = self.attrib_join(attr_[:-1]) attr_right = attr_[-1] attrstr = attr v = self.uniqid('$attr') vl = self.uniqid('$attr') self.add_lookup('variable', v, v) self.add_lookup('variable', vl, vl) if self.bound_methods or self.descriptors: getattr_condition = """(%(v)s=(%(vl)s=%(attr_left)s)['%(attr_right)s']) == null || ((%(vl)s.__is_instance__) && typeof %(v)s == 'function')""" if self.descriptors: getattr_condition += """ || (typeof %(v)s['__get__'] == 'function')""" attr_code = """\ (""" + getattr_condition + """? \t@{{getattr}}(%(vl)s, '%(attr_right)s'): \t%(attr)s)\ """ attr_code = ('\n'+self.spacing()+"\t\t").join(attr_code.split('\n')) else: attr_code = "%(attr)s" attr_code = attr_code % locals() s = self.spacing() orig_attr = attr if not self.attribute_checking: attr = attr_code else: if attr.find('(') < 0 and not self.debug: attrstr = attr.replace("\n", "\n\\") attr = """(typeof %(attr)s=='undefined'? %(s)s\t\t(function(){throw TypeError("%(attrstr)s is undefined");})(): %(s)s\t\t%(attr_code)s)""" % locals() else: attr_ = attr if self.source_tracking or self.debug: _source_tracking = self.source_tracking _debug = self.debug _attribute_checking = self.attribute_checking self.attribute_checking = self.source_tracking = self.debug = False attr_ = self.attrib_join(self._getattr(node, current_klass)) self.source_tracking = _source_tracking self.debug = _debug self.attribute_checking = _attribute_checking attrstr = attr_.replace("\n", "\\\n") attr = """(function(){ %(s)s\tvar $pyjs__testval=%(attr_code)s; %(s)s\treturn (typeof $pyjs__testval=='undefined'? %(s)s\t\t(function(){throw TypeError(\"%(attrstr)s is undefined");})(): %(s)s\t\t$pyjs__testval); %(s)s})()""" % locals() if True: # not self.attribute_checking or self.inline_code: return attr bound_methods = self.bound_methods and "true" or "false" descriptors = self.descriptors and "true" or "false" attribute_checking = self.attribute_checking and "true" or "false" source_tracking = self.source_tracking and "true" or "false" attr = """\ @{{__getattr_check}}(%(attr)s, %(attr_left)s, %(attr_right)s,\ "%(attrstr)s", %(bound_methods)s, %(descriptors)s, %(attribute_checking)s,\ %(source_tracking)s) """ % locals() return attr elif isinstance(node, self.ast.List): return self._list(node, current_klass) elif isinstance(node, self.ast.Dict): return self._dict(node, current_klass) elif isinstance(node, self.ast.Tuple): return self._tuple(node, current_klass) elif isinstance(node, self.ast.Slice): return self._slice(node, current_klass) elif isinstance(node, self.ast.Lambda): return self._lambda(node, current_klass) elif isinstance(node, self.ast.ListComp): return self._listcomp(node, current_klass) elif isinstance(node, self.ast.IfExp): return self._if_expr(node, current_klass) elif isinstance(node, self.ast.Yield): return self._yield_expr(node, current_klass) elif isinstance(node, self.ast.Backquote): return self._backquote(node, current_klass) elif isinstance(node, self.ast.GenExpr): return self._genexpr(node, current_klass) else: raise TranslationError( "unsupported type (in expr)", node, self.module_name) def import_compiler(internal_ast): if internal_ast: from lib2to3 import compiler else: import compiler return compiler def translate(compiler, sources, output_file, module_name=None, **kw): kw = dict(all_compile_options, **kw) list_imports = kw.get('list_imports', False) sources = map(os.path.abspath, sources) if not module_name: module_name, extension = os.path.splitext(os.path.basename(sources[0])) trees = [] tree= None for src in sources: current_tree = compiler.parseFile(src) flags = set() f = file(src) for l in f: if l.startswith('#@PYJS_'): flags.add(l.strip()[7:]) f.close() if tree: tree = merge(compiler.ast, module_name, tree, current_tree, flags) else: tree = current_tree #XXX: if we have an override the sourcefile and the tree is not the same! f = file(sources[0], "r") src = f.read() f.close() if list_imports: v = ImportVisitor(module_name) compiler.walk(tree, v) return v.imported_modules, v.imported_js if output_file == '-': output = sys.stdout else: output = file(output_file, 'w') t = Translator(compiler, module_name, sources[0], src, tree, output, **kw) output.close() return t.imported_modules, t.imported_js def merge(ast, module_name, tree1, tree2, flags): if 'FULL_OVERRIDE' in flags: return tree2 for child in tree2.node: if isinstance(child, ast.Function): replaceFunction(ast, module_name, tree1, child.name, child) elif isinstance(child, ast.Class): replaceClassMethods(ast, module_name, tree1, child.name, child) else: raise TranslationError( "Do not know how to merge %s" % child, child, module_name) return tree1 def replaceFunction(ast, module_name, tree, function_name, function_node): # find function to replace for child in tree.node: if isinstance(child, ast.Function) and child.name == function_name: copyFunction(child, function_node) return raise TranslationError( "function not found: " + function_name, function_node, module_name) def copyFunction(target, source): target.code = source.code target.argnames = source.argnames target.defaults = source.defaults target.doc = source.doc # @@@ not sure we need to do this any more def addCode(target, source): target.nodes.append(source) def replaceClassMethods(ast, module_name, tree, class_name, class_node): # find class to replace old_class_node = None for child in tree.node: if isinstance(child, ast.Class) and child.name == class_name: old_class_node = child break if not old_class_node: raise TranslationError( "class not found: " + class_name, class_node, module_name) # replace methods for node in class_node.code: if isinstance(node, ast.Function): found = False for child in old_class_node.code: if isinstance(child, ast.Function) and child.name == node.name: found = True copyFunction(child, node) break if not found: raise TranslationError( "class method not found: " + class_name + "." + node.name, node, module_name) elif isinstance(node, ast.Assign) and \ isinstance(node.nodes[0], ast.AssName): found = False for child in old_class_node.code: if isinstance(child, ast.Assign) and \ eqNodes(child.nodes, node.nodes): found = True copyAssign(child, node) if not found: addCode(old_class_node.code, node) elif isinstance(node, ast.Pass): pass else: raise TranslationError( "Do not know how to merge %s" % node, node, self.module_name) class PlatformParser: def __init__(self, compiler, platform_dir = "", verbose=True, chain_plat=None): self.platform_dir = platform_dir self.parse_cache = {} self.platform = "" self.verbose = verbose self.chain_plat = chain_plat self.compiler = compiler def setPlatform(self, platform): self.platform = platform def parseModule(self, module_name, file_name): importing = False if not self.parse_cache.has_key(file_name): importing = True if self.chain_plat: mod, override = self.chain_plat.parseModule(module_name, file_name) else: mod = self.compiler.parseFile(file_name) self.parse_cache[file_name] = mod else: mod = self.parse_cache[file_name] override = False platform_file_name = self.generatePlatformFilename(file_name) if self.platform and os.path.isfile(platform_file_name): mod = copy.deepcopy(mod) mod_override = self.compiler.parseFile(platform_file_name) if self.verbose: print "Merging", module_name, self.platform self.merge(smod, mod_override) override = True if self.verbose: if override: print "Importing %s (Platform %s)" % (module_name, self.platform) elif importing: print "Importing %s" % (module_name) return mod, override def generatePlatformFilename(self, file_name): (module_name, extension) = os.path.splitext(os.path.basename(file_name)) platform_file_name = module_name + self.platform + extension return os.path.join(os.path.dirname(file_name), self.platform_dir, platform_file_name) def replaceFunction(self, tree, function_name, function_node): # find function to replace for child in tree.node: if isinstance(child, self.ast.Function) and child.name == function_name: self.copyFunction(child, function_node) return raise TranslationError( "function not found: " + function_name, function_node, self.module_name) def replaceClassMethods(self, tree, class_name, class_node): # find class to replace old_class_node = None for child in tree.node: if isinstance(child, self.ast.Class) and child.name == class_name: old_class_node = child break if not old_class_node: raise TranslationError( "class not found: " + class_name, class_node, self.module_name) # replace methods for node in class_node.code: if isinstance(node, self.ast.Function): found = False for child in old_class_node.code: if isinstance(child, self.ast.Function) and child.name == node.name: found = True self.copyFunction(child, node) break if not found: raise TranslationError( "class method not found: " + class_name + "." + node.name, node, self.module_name) elif isinstance(node, self.ast.Assign) and \ isinstance(node.nodes[0], self.ast.AssName): found = False for child in old_class_node.code: if isinstance(child, self.ast.Assign) and \ self.eqNodes(child.nodes, node.nodes): found = True self.copyAssign(child, node) if not found: self.addCode(old_class_node.code, node) elif isinstance(node, self.ast.Pass): pass else: raise TranslationError( "Do not know how to merge %s" % node, node, self.module_name) def copyFunction(self, target, source): target.code = source.code target.argnames = source.argnames target.defaults = source.defaults target.doc = source.doc # @@@ not sure we need to do this any more def copyAssign(self, target, source): target.nodes = source.nodes target.expr = source.expr target.lineno = source.lineno return def eqNodes(self, nodes1, nodes2): return str(nodes1) == str(nodes2) def dotreplace(fname): path, ext = os.path.splitext(fname) return path.replace(".", "/") + ext class ImportVisitor(object): def __init__(self, module_name): self.module_name = module_name self.imported_modules = [] self.imported_js = [] def add_imported_module(self, importName): if not importName in self.imported_modules: self.imported_modules.append(importName) def visitModule(self, node): self.visit(node.node) def visitImport(self, node): self._doImport(node.names) def _doImport(self, names): for importName, importAs in names: if importName == '__pyjamas__': continue if importName.endswith(".js"): continue imp.add_imported_js(importName) continue self.add_imported_module(importName) def visitFrom(self, node): if node.modname == '__pyjamas__': return if node.modname == '__javascript__': return # XXX: hack for in-function checking, we should have another # object to check our scope absPath = False modname = node.modname if hasattr(node, 'level') and node.level > 0: absPath = True modname = self.module_name.split('.') level = node.level if len(modname) < level: raise TranslationError( "Attempted relative import beyond toplevel package", node, self.module_name) if node.modname != '': level += 1 if level > 1: modname = '.'.join(modname[:-(node.level-1)]) else: modname = self.module_name if node.modname != '': modname += '.' + node.modname if modname[0] == '.': modname = modname[1:] for name in node.names: sub = modname + '.' + name[0] ass_name = name[1] or name[0] self._doImport(((sub, ass_name),)) class AppTranslator: def __init__(self, compiler, library_dirs=[], parser=None, dynamic=False, verbose=True, debug=False, print_statements=True, function_argument_checking=True, attribute_checking=True, bound_methods=True, descriptors=True, source_tracking=True, line_tracking=True, store_source=True, inline_code=False, operator_funcs=True, number_classes=True, ): self.compiler = compiler self.extension = ".py" self.print_statements = print_statements self.library_modules = [] self.overrides = {} self.library_dirs = path + library_dirs self.dynamic = dynamic self.verbose = verbose self.debug = debug self.print_statements = print_statements self.function_argument_checking = function_argument_checking self.attribute_checking = attribute_checking self.bound_methods = bound_methods self.descriptors = descriptors self.source_tracking = source_tracking self.line_tracking = line_tracking self.store_source = store_source self.inline_code = inline_code self.operator_funcs = operator_funcs self.number_classes = number_classes if not parser: self.parser = PlatformParser(self.compiler) else: self.parser = parser self.parser.dynamic = dynamic def findFile(self, file_name): if os.path.isfile(file_name): return file_name for library_dir in self.library_dirs: file_name = dotreplace(file_name) full_file_name = os.path.join( LIBRARY_PATH, library_dir, file_name) if os.path.isfile(full_file_name): return full_file_name fnameinit, ext = os.path.splitext(file_name) fnameinit = fnameinit + "/__init__.py" full_file_name = os.path.join( LIBRARY_PATH, library_dir, fnameinit) if os.path.isfile(full_file_name): return full_file_name raise Exception("file not found: " + file_name) def _translate(self, module_name, debug=False): self.library_modules.append(module_name) file_name = self.findFile(module_name + self.extension) output = StringIO() f = file(file_name, "r") src = f.read() f.close() mod, override = self.parser.parseModule(module_name, file_name) if override: override_name = "%s.%s" % (self.parser.platform.lower(), module_name) self.overrides[override_name] = override_name t = Translator(self.compiler, module_name, file_name, src, mod, output, self.dynamic, self.findFile, debug = self.debug, print_statements = self.print_statements, function_argument_checking = self.function_argument_checking, attribute_checking = self.attribute_checking, bound_methods = self.bound_methods, descriptors = self.descriptors, source_tracking = self.source_tracking, line_tracking = self.line_tracking, store_source = self.store_source, inline_code = self.inline_code, operator_funcs = self.operator_funcs, number_classes = self.number_classes, ) module_str = output.getvalue() imported_modules_str = "" for module in t.imported_modules: if module not in self.library_modules: self.library_modules.append(module) return imported_modules_str + module_str def translate(self, module_name, is_app=True, debug=False, library_modules=[]): app_code = StringIO() lib_code = StringIO() imported_js = [] self.library_modules = [] self.overrides = {} for library in library_modules: if library.endswith(".js"): imported_js.append(library) continue self.library_modules.append(library) if self.verbose: print 'Including LIB', library print >> lib_code, '\n//\n// BEGIN LIB '+library+'\n//\n' print >> lib_code, self._translate( library, False, debug=debug, imported_js=imported_js) print >> lib_code, "/* initialize static library */" print >> lib_code, "%s();\n" % library print >> lib_code, '\n//\n// END LIB '+library+'\n//\n' if module_name: print >> app_code, self._translate( module_name, is_app, debug=debug, imported_js=imported_js) for js in imported_js: path = self.findFile(js) if os.path.isfile(path): if self.verbose: print 'Including JS', js print >> lib_code, '\n//\n// BEGIN JS '+js+'\n//\n' print >> lib_code, file(path).read() print >> lib_code, '\n//\n// END JS '+js+'\n//\n' else: print >>sys.stderr, 'Warning: Unable to find imported javascript:', js return lib_code.getvalue(), app_code.getvalue()
anandology/pyjamas
pyjs/src/pyjs/translator_proto.py
Python
apache-2.0
192,944
[ "VisIt" ]
313b0d269fb9c91f4b01a00f84a3c255bb96f9727abbe1f54f87866b560fe3aa
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.conf import settings from django.conf.urls import include, url from django.conf.urls.static import static from django.contrib import admin from django.views.generic import TemplateView urlpatterns = [ url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name="home"), url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name="about"), # Django Admin url(r'^admin/', include(admin.site.urls)), # User management url(r'^users/', include("repork_project.users.urls", namespace="users")), url(r'^accounts/', include('allauth.urls')), # Your stuff: custom urls includes go here ] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) if settings.DEBUG: # This allows the error pages to be debugged during development, just visit # these url in browser to see how these error pages look like. urlpatterns += [ url(r'^400/$', 'django.views.defaults.bad_request'), url(r'^403/$', 'django.views.defaults.permission_denied'), url(r'^404/$', 'django.views.defaults.page_not_found'), url(r'^500/$', 'django.views.defaults.server_error'), ]
jvosk/repork
config/urls.py
Python
bsd-3-clause
1,236
[ "VisIt" ]
58acb4214d2a9f76a880e83de8520334e3d429c5c419b7f99aa079e9e5d50502
# Copyright (c) 2015 Ansible, Inc. # All Rights Reserved. import os import re # noqa import sys from datetime import timedelta # global settings from django.conf import global_settings # ugettext lazy from django.utils.translation import ugettext_lazy as _ # Update this module's local settings from the global settings module. this_module = sys.modules[__name__] for setting in dir(global_settings): if setting == setting.upper(): setattr(this_module, setting, getattr(global_settings, setting)) # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(__file__)) def is_testing(argv=None): import sys '''Return True if running django or py.test unit tests.''' if 'PYTEST_CURRENT_TEST' in os.environ.keys(): return True argv = sys.argv if argv is None else argv if len(argv) >= 1 and ('py.test' in argv[0] or 'py/test.py' in argv[0]): return True elif len(argv) >= 2 and argv[1] == 'test': return True return False def IS_TESTING(argv=None): return is_testing(argv) if "pytest" in sys.modules: from unittest import mock with mock.patch('__main__.__builtins__.dir', return_value=[]): import ldap else: import ldap DEBUG = True SQL_DEBUG = DEBUG DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'awx.sqlite3'), 'ATOMIC_REQUESTS': True, 'TEST': { # Test database cannot be :memory: for inventory tests. 'NAME': os.path.join(BASE_DIR, 'awx_test.sqlite3'), }, } } AWX_CONTAINER_GROUP_K8S_API_TIMEOUT = 10 AWX_CONTAINER_GROUP_POD_LAUNCH_RETRIES = 100 AWX_CONTAINER_GROUP_POD_LAUNCH_RETRY_DELAY = 5 AWX_CONTAINER_GROUP_DEFAULT_NAMESPACE = 'default' AWX_CONTAINER_GROUP_DEFAULT_IMAGE = 'ansible/ansible-runner' # Internationalization # https://docs.djangoproject.com/en/dev/topics/i18n/ # # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # On Unix systems, a value of None will cause Django to use the same # timezone as the operating system. # If running in a Windows environment this must be set to the same as your # system time zone. TIME_ZONE = 'UTC' # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # If you set this to False, Django will not format dates, numbers and # calendars according to the current locale USE_L10N = True USE_TZ = True STATICFILES_DIRS = ( os.path.join(BASE_DIR, 'ui', 'static'), os.path.join(BASE_DIR, 'ui_next', 'build', 'static'), os.path.join(BASE_DIR, 'static'), ) # Absolute filesystem path to the directory where static file are collected via # the collectstatic command. STATIC_ROOT = os.path.join(BASE_DIR, 'public', 'static') # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/dev/howto/static-files/ STATIC_URL = '/static/' # Absolute filesystem path to the directory that will hold user-uploaded files. # Example: "/home/media/media.lawrence.com/" MEDIA_ROOT = os.path.join(BASE_DIR, 'public', 'media') # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash if there is a path component (optional in other cases). # Examples: "http://media.lawrence.com", "http://example.com/media/" MEDIA_URL = '/media/' LOGIN_URL = '/api/login/' # Absolute filesystem path to the directory to host projects (with playbooks). # This directory should not be web-accessible. PROJECTS_ROOT = os.path.join(BASE_DIR, 'projects') # Absolute filesystem path to the directory to host collections for # running inventory imports, isolated playbooks AWX_ANSIBLE_COLLECTIONS_PATHS = os.path.join(BASE_DIR, 'vendor', 'awx_ansible_collections') # Absolute filesystem path to the directory for job status stdout (default for # development and tests, default for production defined in production.py). This # directory should not be web-accessible JOBOUTPUT_ROOT = os.path.join(BASE_DIR, 'job_output') # Absolute filesystem path to the directory to store logs LOG_ROOT = os.path.join(BASE_DIR) # The heartbeat file for the tower scheduler SCHEDULE_METADATA_LOCATION = os.path.join(BASE_DIR, '.tower_cycle') # Django gettext files path: locale/<lang-code>/LC_MESSAGES/django.po, django.mo LOCALE_PATHS = ( os.path.join(BASE_DIR, 'locale'), ) # Graph of resources that can have named-url NAMED_URL_GRAPH = {} # Maximum number of the same job that can be waiting to run when launching from scheduler # Note: This setting may be overridden by database settings. SCHEDULE_MAX_JOBS = 10 SITE_ID = 1 # Make this unique, and don't share it with anybody. SECRET_KEY = 'p7z7g1ql4%6+(6nlebb6hdk7sd^&fnjpal308%n%+p^_e6vo1y' # Hosts/domain names that are valid for this site; required if DEBUG is False # See https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts ALLOWED_HOSTS = [] # HTTP headers and meta keys to search to determine remote host name or IP. Add # additional items to this list, such as "HTTP_X_FORWARDED_FOR", if behind a # reverse proxy. REMOTE_HOST_HEADERS = ['REMOTE_ADDR', 'REMOTE_HOST'] # If Tower is behind a reverse proxy/load balancer, use this setting to # allow the proxy IP addresses from which Tower should trust custom # REMOTE_HOST_HEADERS header values # REMOTE_HOST_HEADERS = ['HTTP_X_FORWARDED_FOR', ''REMOTE_ADDR', 'REMOTE_HOST'] # PROXY_IP_ALLOWED_LIST = ['10.0.1.100', '10.0.1.101'] # If this setting is an empty list (the default), the headers specified by # REMOTE_HOST_HEADERS will be trusted unconditionally') PROXY_IP_ALLOWED_LIST = [] CUSTOM_VENV_PATHS = [] # Note: This setting may be overridden by database settings. STDOUT_MAX_BYTES_DISPLAY = 1048576 # Returned in the header on event api lists as a recommendation to the UI # on how many events to display before truncating/hiding MAX_UI_JOB_EVENTS = 4000 # Returned in index.html, tells the UI if it should make requests # to update job data in response to status changes websocket events UI_LIVE_UPDATES_ENABLED = True # The maximum size of the ansible callback event's res data structure # beyond this limit and the value will be removed MAX_EVENT_RES_DATA = 700000 # Note: This setting may be overridden by database settings. EVENT_STDOUT_MAX_BYTES_DISPLAY = 1024 # The amount of time before a stdout file is expired and removed locally # Note that this can be recreated if the stdout is downloaded LOCAL_STDOUT_EXPIRE_TIME = 2592000 # The number of processes spawned by the callback receiver to process job # events into the database JOB_EVENT_WORKERS = 4 # The maximum size of the job event worker queue before requests are blocked JOB_EVENT_MAX_QUEUE_SIZE = 10000 # The number of job events to migrate per-transaction when moving from int -> bigint JOB_EVENT_MIGRATION_CHUNK_SIZE = 1000000 # Disallow sending session cookies over insecure connections SESSION_COOKIE_SECURE = True # Seconds before sessions expire. # Note: This setting may be overridden by database settings. SESSION_COOKIE_AGE = 1800 # Maximum number of per-user valid, concurrent sessions. # -1 is unlimited # Note: This setting may be overridden by database settings. SESSIONS_PER_USER = -1 CSRF_USE_SESSIONS = False # Disallow sending csrf cookies over insecure connections CSRF_COOKIE_SECURE = True # Limit CSRF cookies to browser sessions CSRF_COOKIE_AGE = None TEMPLATES = [ { 'NAME': 'default', 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'OPTIONS': { 'debug': DEBUG, 'context_processors': [# NOQA 'django.contrib.auth.context_processors.auth', 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.template.context_processors.i18n', 'django.template.context_processors.media', 'django.template.context_processors.static', 'django.template.context_processors.tz', 'django.contrib.messages.context_processors.messages', 'awx.ui.context_processors.settings', 'awx.ui.context_processors.version', 'social_django.context_processors.backends', 'social_django.context_processors.login_redirect', ], 'loaders': [( 'django.template.loaders.cached.Loader', ('django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader',), )], 'builtins': ['awx.main.templatetags.swagger'], }, 'DIRS': [ os.path.join(BASE_DIR, 'templates'), os.path.join(BASE_DIR, 'ui_next', 'build'), ], }, ] ROOT_URLCONF = 'awx.urls' WSGI_APPLICATION = 'awx.wsgi.application' INSTALLED_APPS = [ 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.messages', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.staticfiles', 'oauth2_provider', 'rest_framework', 'django_extensions', 'channels', 'polymorphic', 'taggit', 'social_django', 'corsheaders', 'awx.conf', 'awx.main', 'awx.api', 'awx.ui', 'awx.sso', 'solo' ] INTERNAL_IPS = ('127.0.0.1',) MAX_PAGE_SIZE = 200 REST_FRAMEWORK = { 'DEFAULT_PAGINATION_CLASS': 'awx.api.pagination.Pagination', 'PAGE_SIZE': 25, 'DEFAULT_AUTHENTICATION_CLASSES': ( 'awx.api.authentication.LoggedOAuth2Authentication', 'awx.api.authentication.SessionAuthentication', 'awx.api.authentication.LoggedBasicAuthentication', ), 'DEFAULT_PERMISSION_CLASSES': ( 'awx.api.permissions.ModelAccessPermission', ), 'DEFAULT_FILTER_BACKENDS': ( 'awx.api.filters.TypeFilterBackend', 'awx.api.filters.FieldLookupBackend', 'rest_framework.filters.SearchFilter', 'awx.api.filters.OrderByBackend', ), 'DEFAULT_PARSER_CLASSES': ( 'awx.api.parsers.JSONParser', ), 'DEFAULT_RENDERER_CLASSES': ( 'awx.api.renderers.DefaultJSONRenderer', 'awx.api.renderers.BrowsableAPIRenderer', ), 'DEFAULT_METADATA_CLASS': 'awx.api.metadata.Metadata', 'EXCEPTION_HANDLER': 'awx.api.views.api_exception_handler', 'VIEW_DESCRIPTION_FUNCTION': 'awx.api.generics.get_view_description', 'NON_FIELD_ERRORS_KEY': '__all__', 'DEFAULT_VERSION': 'v2', # For swagger schema generation # see https://github.com/encode/django-rest-framework/pull/6532 'DEFAULT_SCHEMA_CLASS': 'rest_framework.schemas.AutoSchema', #'URL_FORMAT_OVERRIDE': None, } AUTHENTICATION_BACKENDS = ( 'awx.sso.backends.LDAPBackend', 'awx.sso.backends.LDAPBackend1', 'awx.sso.backends.LDAPBackend2', 'awx.sso.backends.LDAPBackend3', 'awx.sso.backends.LDAPBackend4', 'awx.sso.backends.LDAPBackend5', 'awx.sso.backends.RADIUSBackend', 'awx.sso.backends.TACACSPlusBackend', 'social_core.backends.google.GoogleOAuth2', 'social_core.backends.github.GithubOAuth2', 'social_core.backends.github.GithubOrganizationOAuth2', 'social_core.backends.github.GithubTeamOAuth2', 'social_core.backends.azuread.AzureADOAuth2', 'awx.sso.backends.SAMLAuth', 'django.contrib.auth.backends.ModelBackend', ) # Django OAuth Toolkit settings OAUTH2_PROVIDER_APPLICATION_MODEL = 'main.OAuth2Application' OAUTH2_PROVIDER_ACCESS_TOKEN_MODEL = 'main.OAuth2AccessToken' OAUTH2_PROVIDER_REFRESH_TOKEN_MODEL = 'oauth2_provider.RefreshToken' OAUTH2_PROVIDER = {'ACCESS_TOKEN_EXPIRE_SECONDS': 31536000000, 'AUTHORIZATION_CODE_EXPIRE_SECONDS': 600, 'REFRESH_TOKEN_EXPIRE_SECONDS': 2628000} ALLOW_OAUTH2_FOR_EXTERNAL_USERS = False # LDAP server (default to None to skip using LDAP authentication). # Note: This setting may be overridden by database settings. AUTH_LDAP_SERVER_URI = None # Disable LDAP referrals by default (to prevent certain LDAP queries from # hanging with AD). # Note: This setting may be overridden by database settings. AUTH_LDAP_CONNECTION_OPTIONS = { ldap.OPT_REFERRALS: 0, ldap.OPT_NETWORK_TIMEOUT: 30 } # Radius server settings (default to empty string to skip using Radius auth). # Note: These settings may be overridden by database settings. RADIUS_SERVER = '' RADIUS_PORT = 1812 RADIUS_SECRET = '' # TACACS+ settings (default host to empty string to skip using TACACS+ auth). # Note: These settings may be overridden by database settings. TACACSPLUS_HOST = '' TACACSPLUS_PORT = 49 TACACSPLUS_SECRET = '' TACACSPLUS_SESSION_TIMEOUT = 5 TACACSPLUS_AUTH_PROTOCOL = 'ascii' # Enable / Disable HTTP Basic Authentication used in the API browser # Note: Session limits are not enforced when using HTTP Basic Authentication. # Note: This setting may be overridden by database settings. AUTH_BASIC_ENABLED = True # If set, specifies a URL that unauthenticated users will be redirected to # when trying to access a UI page that requries authentication. LOGIN_REDIRECT_OVERRIDE = '' # Default to skipping isolated host key checking (the initial connection will # hang on an interactive "The authenticity of host example.org can't be # established" message) AWX_ISOLATED_HOST_KEY_CHECKING = False # The number of seconds to sleep between status checks for jobs running on isolated nodes AWX_ISOLATED_CHECK_INTERVAL = 30 # The timeout (in seconds) for launching jobs on isolated nodes AWX_ISOLATED_LAUNCH_TIMEOUT = 600 # Ansible connection timeout (in seconds) for communicating with isolated instances AWX_ISOLATED_CONNECTION_TIMEOUT = 10 # The time (in seconds) between the periodic isolated heartbeat status check AWX_ISOLATED_PERIODIC_CHECK = 600 DEVSERVER_DEFAULT_ADDR = '0.0.0.0' DEVSERVER_DEFAULT_PORT = '8013' # Set default ports for live server tests. os.environ.setdefault('DJANGO_LIVE_TEST_SERVER_ADDRESS', 'localhost:9013-9199') BROKER_URL = 'unix:///var/run/redis/redis.sock' CELERYBEAT_SCHEDULE = { 'tower_scheduler': { 'task': 'awx.main.tasks.awx_periodic_scheduler', 'schedule': timedelta(seconds=30), 'options': {'expires': 20,} }, 'cluster_heartbeat': { 'task': 'awx.main.tasks.cluster_node_heartbeat', 'schedule': timedelta(seconds=60), 'options': {'expires': 50,} }, 'gather_analytics': { 'task': 'awx.main.tasks.gather_analytics', 'schedule': timedelta(minutes=5) }, 'task_manager': { 'task': 'awx.main.scheduler.tasks.run_task_manager', 'schedule': timedelta(seconds=20), 'options': {'expires': 20} }, 'k8s_reaper': { 'task': 'awx.main.tasks.awx_k8s_reaper', 'schedule': timedelta(seconds=60), 'options': {'expires': 50,} }, # 'isolated_heartbeat': set up at the end of production.py and development.py } # Django Caching Configuration DJANGO_REDIS_IGNORE_EXCEPTIONS = True CACHES = { 'default': { 'BACKEND': 'django_redis.cache.RedisCache', 'LOCATION': 'unix:/var/run/redis/redis.sock?db=1' }, } # Social Auth configuration. SOCIAL_AUTH_STRATEGY = 'social_django.strategy.DjangoStrategy' SOCIAL_AUTH_STORAGE = 'social_django.models.DjangoStorage' SOCIAL_AUTH_USER_MODEL = AUTH_USER_MODEL # noqa _SOCIAL_AUTH_PIPELINE_BASE = ( 'social_core.pipeline.social_auth.social_details', 'social_core.pipeline.social_auth.social_uid', 'social_core.pipeline.social_auth.auth_allowed', 'social_core.pipeline.social_auth.social_user', 'social_core.pipeline.user.get_username', 'social_core.pipeline.social_auth.associate_by_email', 'social_core.pipeline.user.create_user', 'awx.sso.pipeline.check_user_found_or_created', 'social_core.pipeline.social_auth.associate_user', 'social_core.pipeline.social_auth.load_extra_data', 'awx.sso.pipeline.set_is_active_for_new_user', 'social_core.pipeline.user.user_details', 'awx.sso.pipeline.prevent_inactive_login', ) SOCIAL_AUTH_PIPELINE = _SOCIAL_AUTH_PIPELINE_BASE + ( 'awx.sso.pipeline.update_user_orgs', 'awx.sso.pipeline.update_user_teams', ) SOCIAL_AUTH_SAML_PIPELINE = _SOCIAL_AUTH_PIPELINE_BASE + ( 'awx.sso.pipeline.update_user_orgs_by_saml_attr', 'awx.sso.pipeline.update_user_teams_by_saml_attr', 'awx.sso.pipeline.update_user_orgs', 'awx.sso.pipeline.update_user_teams', ) SOCIAL_AUTH_LOGIN_URL = '/' SOCIAL_AUTH_LOGIN_REDIRECT_URL = '/sso/complete/' SOCIAL_AUTH_LOGIN_ERROR_URL = '/sso/error/' SOCIAL_AUTH_INACTIVE_USER_URL = '/sso/inactive/' SOCIAL_AUTH_RAISE_EXCEPTIONS = False SOCIAL_AUTH_USERNAME_IS_FULL_EMAIL = False #SOCIAL_AUTH_SLUGIFY_USERNAMES = True SOCIAL_AUTH_CLEAN_USERNAMES = True SOCIAL_AUTH_SANITIZE_REDIRECTS = True SOCIAL_AUTH_REDIRECT_IS_HTTPS = False # Note: These settings may be overridden by database settings. SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = '' SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = '' SOCIAL_AUTH_GOOGLE_OAUTH2_SCOPE = ['profile'] SOCIAL_AUTH_GITHUB_KEY = '' SOCIAL_AUTH_GITHUB_SECRET = '' SOCIAL_AUTH_GITHUB_SCOPE = ['user:email', 'read:org'] SOCIAL_AUTH_GITHUB_ORG_KEY = '' SOCIAL_AUTH_GITHUB_ORG_SECRET = '' SOCIAL_AUTH_GITHUB_ORG_NAME = '' SOCIAL_AUTH_GITHUB_ORG_SCOPE = ['user:email', 'read:org'] SOCIAL_AUTH_GITHUB_TEAM_KEY = '' SOCIAL_AUTH_GITHUB_TEAM_SECRET = '' SOCIAL_AUTH_GITHUB_TEAM_ID = '' SOCIAL_AUTH_GITHUB_TEAM_SCOPE = ['user:email', 'read:org'] SOCIAL_AUTH_AZUREAD_OAUTH2_KEY = '' SOCIAL_AUTH_AZUREAD_OAUTH2_SECRET = '' SOCIAL_AUTH_SAML_SP_ENTITY_ID = '' SOCIAL_AUTH_SAML_SP_PUBLIC_CERT = '' SOCIAL_AUTH_SAML_SP_PRIVATE_KEY = '' SOCIAL_AUTH_SAML_ORG_INFO = {} SOCIAL_AUTH_SAML_TECHNICAL_CONTACT = {} SOCIAL_AUTH_SAML_SUPPORT_CONTACT = {} SOCIAL_AUTH_SAML_ENABLED_IDPS = {} SOCIAL_AUTH_SAML_ORGANIZATION_ATTR = {} SOCIAL_AUTH_SAML_TEAM_ATTR = {} # Any ANSIBLE_* settings will be passed to the task runner subprocess # environment # Do not want AWX to ask interactive questions and want it to be friendly with # reprovisioning ANSIBLE_HOST_KEY_CHECKING = False # RHEL has too old of an SSH so ansible will select paramiko and this is VERY # slow. ANSIBLE_PARAMIKO_RECORD_HOST_KEYS = False # Force ansible in color even if we don't have a TTY so we can properly colorize # output ANSIBLE_FORCE_COLOR = True # If tmp generated inventory parsing fails (error state), fail playbook fast ANSIBLE_INVENTORY_UNPARSED_FAILED = True # Additional environment variables to be passed to the ansible subprocesses AWX_TASK_ENV = {} # Rebuild Host Smart Inventory memberships. AWX_REBUILD_SMART_MEMBERSHIP = False # By default, allow arbitrary Jinja templating in extra_vars defined on a Job Template ALLOW_JINJA_IN_EXTRA_VARS = 'template' # Run project updates with extra verbosity PROJECT_UPDATE_VVV = False # Enable dynamically pulling roles from a requirement.yml file # when updating SCM projects # Note: This setting may be overridden by database settings. AWX_ROLES_ENABLED = True # Enable dynamically pulling collections from a requirement.yml file # when updating SCM projects # Note: This setting may be overridden by database settings. AWX_COLLECTIONS_ENABLED = True # Follow symlinks when scanning for playbooks AWX_SHOW_PLAYBOOK_LINKS = False # Settings for primary galaxy server, should be set in the UI PRIMARY_GALAXY_URL = '' PRIMARY_GALAXY_USERNAME = '' PRIMARY_GALAXY_TOKEN = '' PRIMARY_GALAXY_PASSWORD = '' PRIMARY_GALAXY_AUTH_URL = '' # Settings for the public galaxy server(s). PUBLIC_GALAXY_ENABLED = True PUBLIC_GALAXY_SERVER = { 'id': 'galaxy', 'url': 'https://galaxy.ansible.com' } # Applies to any galaxy server GALAXY_IGNORE_CERTS = False # List of dicts of fallback (additional) Galaxy servers. If configured, these # will be higher precedence than public Galaxy, but lower than primary Galaxy. # Available options: 'id', 'url', 'username', 'password', 'token', 'auth_url' FALLBACK_GALAXY_SERVERS = [] # Enable bubblewrap support for running jobs (playbook runs only). # Note: This setting may be overridden by database settings. AWX_PROOT_ENABLED = True # Command/path to bubblewrap. AWX_PROOT_CMD = 'bwrap' # Additional paths to hide from jobs using bubblewrap. # Note: This setting may be overridden by database settings. AWX_PROOT_HIDE_PATHS = [] # Additional paths to show for jobs using bubbelwrap. # Note: This setting may be overridden by database settings. AWX_PROOT_SHOW_PATHS = [] # The directory in which Tower will create new temporary directories for job # execution and isolation (such as credential files and custom # inventory scripts). # Note: This setting may be overridden by database settings. AWX_PROOT_BASE_PATH = "/tmp" # Disable resource profiling by default AWX_RESOURCE_PROFILING_ENABLED = False # Interval (in seconds) between polls for cpu usage AWX_RESOURCE_PROFILING_CPU_POLL_INTERVAL = '0.25' # Interval (in seconds) between polls for memory usage AWX_RESOURCE_PROFILING_MEMORY_POLL_INTERVAL = '0.25' # Interval (in seconds) between polls for PID count AWX_RESOURCE_PROFILING_PID_POLL_INTERVAL = '0.25' # User definable ansible callback plugins # Note: This setting may be overridden by database settings. AWX_ANSIBLE_CALLBACK_PLUGINS = "" # Automatically remove nodes that have missed their heartbeats after some time AWX_AUTO_DEPROVISION_INSTANCES = False # Enable Pendo on the UI, possible values are 'off', 'anonymous', and 'detailed' # Note: This setting may be overridden by database settings. PENDO_TRACKING_STATE = "off" # Enables Insights data collection for Ansible Tower. # Note: This setting may be overridden by database settings. INSIGHTS_TRACKING_STATE = False # Last gather date for Analytics AUTOMATION_ANALYTICS_LAST_GATHER = None # Default list of modules allowed for ad hoc commands. # Note: This setting may be overridden by database settings. AD_HOC_COMMANDS = [ 'command', 'shell', 'yum', 'apt', 'apt_key', 'apt_repository', 'apt_rpm', 'service', 'group', 'user', 'mount', 'ping', 'selinux', 'setup', 'win_ping', 'win_service', 'win_updates', 'win_group', 'win_user', ] INV_ENV_VARIABLE_BLOCKED = ("HOME", "USER", "_", "TERM") # ---------------- # -- Amazon EC2 -- # ---------------- # AWS does not appear to provide pretty region names via any API, so store the # list of names here. The available region IDs will be pulled from boto. # http://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region EC2_REGION_NAMES = { 'us-east-1': _('US East (Northern Virginia)'), 'us-east-2': _('US East (Ohio)'), 'us-west-2': _('US West (Oregon)'), 'us-west-1': _('US West (Northern California)'), 'ca-central-1': _('Canada (Central)'), 'eu-central-1': _('EU (Frankfurt)'), 'eu-west-1': _('EU (Ireland)'), 'eu-west-2': _('EU (London)'), 'ap-southeast-1': _('Asia Pacific (Singapore)'), 'ap-southeast-2': _('Asia Pacific (Sydney)'), 'ap-northeast-1': _('Asia Pacific (Tokyo)'), 'ap-northeast-2': _('Asia Pacific (Seoul)'), 'ap-south-1': _('Asia Pacific (Mumbai)'), 'sa-east-1': _('South America (Sao Paulo)'), 'us-gov-west-1': _('US West (GovCloud)'), 'cn-north-1': _('China (Beijing)'), } # Inventory variable name/values for determining if host is active/enabled. EC2_ENABLED_VAR = 'ec2_state' EC2_ENABLED_VALUE = 'running' # Inventory variable name containing unique instance ID. EC2_INSTANCE_ID_VAR = 'ec2_id' # Filter for allowed group/host names when importing inventory from EC2. EC2_GROUP_FILTER = r'^.+$' EC2_HOST_FILTER = r'^.+$' EC2_EXCLUDE_EMPTY_GROUPS = True # ------------ # -- VMware -- # ------------ # Inventory variable name/values for determining whether a host is # active in vSphere. VMWARE_ENABLED_VAR = 'guest.gueststate' VMWARE_ENABLED_VALUE = 'running' # Inventory variable name containing the unique instance ID. VMWARE_INSTANCE_ID_VAR = 'config.instanceUuid, config.instanceuuid' # Filter for allowed group and host names when importing inventory # from VMware. VMWARE_GROUP_FILTER = r'^.+$' VMWARE_HOST_FILTER = r'^.+$' VMWARE_EXCLUDE_EMPTY_GROUPS = True VMWARE_VALIDATE_CERTS = False # --------------------------- # -- Google Compute Engine -- # --------------------------- # It's not possible to get zones in GCE without authenticating, so we # provide a list here. # Source: https://developers.google.com/compute/docs/zones GCE_REGION_CHOICES = [ ('us-east1-b', _('US East 1 (B)')), ('us-east1-c', _('US East 1 (C)')), ('us-east1-d', _('US East 1 (D)')), ('us-east4-a', _('US East 4 (A)')), ('us-east4-b', _('US East 4 (B)')), ('us-east4-c', _('US East 4 (C)')), ('us-central1-a', _('US Central (A)')), ('us-central1-b', _('US Central (B)')), ('us-central1-c', _('US Central (C)')), ('us-central1-f', _('US Central (F)')), ('us-west1-a', _('US West (A)')), ('us-west1-b', _('US West (B)')), ('us-west1-c', _('US West (C)')), ('europe-west1-b', _('Europe West 1 (B)')), ('europe-west1-c', _('Europe West 1 (C)')), ('europe-west1-d', _('Europe West 1 (D)')), ('europe-west2-a', _('Europe West 2 (A)')), ('europe-west2-b', _('Europe West 2 (B)')), ('europe-west2-c', _('Europe West 2 (C)')), ('asia-east1-a', _('Asia East (A)')), ('asia-east1-b', _('Asia East (B)')), ('asia-east1-c', _('Asia East (C)')), ('asia-southeast1-a', _('Asia Southeast (A)')), ('asia-southeast1-b', _('Asia Southeast (B)')), ('asia-northeast1-a', _('Asia Northeast (A)')), ('asia-northeast1-b', _('Asia Northeast (B)')), ('asia-northeast1-c', _('Asia Northeast (C)')), ('australia-southeast1-a', _('Australia Southeast (A)')), ('australia-southeast1-b', _('Australia Southeast (B)')), ('australia-southeast1-c', _('Australia Southeast (C)')), ] # Inventory variable name/value for determining whether a host is active # in Google Compute Engine. GCE_ENABLED_VAR = 'status' GCE_ENABLED_VALUE = 'running' # Filter for allowed group and host names when importing inventory from # Google Compute Engine. GCE_GROUP_FILTER = r'^.+$' GCE_HOST_FILTER = r'^.+$' GCE_EXCLUDE_EMPTY_GROUPS = True GCE_INSTANCE_ID_VAR = 'gce_id' # -------------------------------------- # -- Microsoft Azure Resource Manager -- # -------------------------------------- # It's not possible to get zones in Azure without authenticating, so we # provide a list here. AZURE_RM_REGION_CHOICES = [ ('eastus', _('US East')), ('eastus2', _('US East 2')), ('centralus', _('US Central')), ('northcentralus', _('US North Central')), ('southcentralus', _('US South Central')), ('westcentralus', _('US West Central')), ('westus', _('US West')), ('westus2', _('US West 2')), ('canadaeast', _('Canada East')), ('canadacentral', _('Canada Central')), ('brazilsouth', _('Brazil South')), ('northeurope', _('Europe North')), ('westeurope', _('Europe West')), ('ukwest', _('UK West')), ('uksouth', _('UK South')), ('eastasia', _('Asia East')), ('southestasia', _('Asia Southeast')), ('australiaeast', _('Australia East')), ('australiasoutheast', _('Australia Southeast')), ('westindia', _('India West')), ('southindia', _('India South')), ('japaneast', _('Japan East')), ('japanwest', _('Japan West')), ('koreacentral', _('Korea Central')), ('koreasouth', _('Korea South')), ] AZURE_RM_GROUP_FILTER = r'^.+$' AZURE_RM_HOST_FILTER = r'^.+$' AZURE_RM_ENABLED_VAR = 'powerstate' AZURE_RM_ENABLED_VALUE = 'running' AZURE_RM_INSTANCE_ID_VAR = 'id' AZURE_RM_EXCLUDE_EMPTY_GROUPS = True # --------------------- # ----- OpenStack ----- # --------------------- OPENSTACK_ENABLED_VAR = 'status' OPENSTACK_ENABLED_VALUE = 'ACTIVE' OPENSTACK_GROUP_FILTER = r'^.+$' OPENSTACK_HOST_FILTER = r'^.+$' OPENSTACK_EXCLUDE_EMPTY_GROUPS = True OPENSTACK_INSTANCE_ID_VAR = 'openstack.id' # --------------------- # ----- oVirt4 ----- # --------------------- RHV_ENABLED_VAR = 'status' RHV_ENABLED_VALUE = 'up' RHV_GROUP_FILTER = r'^.+$' RHV_HOST_FILTER = r'^.+$' RHV_EXCLUDE_EMPTY_GROUPS = True RHV_INSTANCE_ID_VAR = 'id' # --------------------- # ----- Tower ----- # --------------------- TOWER_ENABLED_VAR = 'remote_tower_enabled' TOWER_ENABLED_VALUE = 'true' TOWER_GROUP_FILTER = r'^.+$' TOWER_HOST_FILTER = r'^.+$' TOWER_EXCLUDE_EMPTY_GROUPS = True TOWER_INSTANCE_ID_VAR = 'remote_tower_id' # --------------------- # ----- Foreman ----- # --------------------- SATELLITE6_ENABLED_VAR = 'foreman.enabled' SATELLITE6_ENABLED_VALUE = 'True' SATELLITE6_GROUP_FILTER = r'^.+$' SATELLITE6_HOST_FILTER = r'^.+$' SATELLITE6_EXCLUDE_EMPTY_GROUPS = True SATELLITE6_INSTANCE_ID_VAR = 'foreman.id' # SATELLITE6_GROUP_PREFIX and SATELLITE6_GROUP_PATTERNS defined in source vars # --------------------- # ----- Custom ----- # --------------------- #CUSTOM_ENABLED_VAR = #CUSTOM_ENABLED_VALUE = CUSTOM_GROUP_FILTER = r'^.+$' CUSTOM_HOST_FILTER = r'^.+$' CUSTOM_EXCLUDE_EMPTY_GROUPS = False #CUSTOM_INSTANCE_ID_VAR = # --------------------- # ----- SCM ----- # --------------------- #SCM_ENABLED_VAR = #SCM_ENABLED_VALUE = SCM_GROUP_FILTER = r'^.+$' SCM_HOST_FILTER = r'^.+$' SCM_EXCLUDE_EMPTY_GROUPS = False #SCM_INSTANCE_ID_VAR = # --------------------- # -- Activity Stream -- # --------------------- # Defaults for enabling/disabling activity stream. # Note: These settings may be overridden by database settings. ACTIVITY_STREAM_ENABLED = True ACTIVITY_STREAM_ENABLED_FOR_INVENTORY_SYNC = False CALLBACK_QUEUE = "callback_tasks" # Note: This setting may be overridden by database settings. ORG_ADMINS_CAN_SEE_ALL_USERS = True MANAGE_ORGANIZATION_AUTH = True # Note: This setting may be overridden by database settings. TOWER_URL_BASE = "https://towerhost" INSIGHTS_URL_BASE = "https://example.org" INSIGHTS_AGENT_MIME = 'application/example' TOWER_SETTINGS_MANIFEST = {} # Settings related to external logger configuration LOG_AGGREGATOR_ENABLED = False LOG_AGGREGATOR_TCP_TIMEOUT = 5 LOG_AGGREGATOR_VERIFY_CERT = True LOG_AGGREGATOR_LEVEL = 'INFO' LOG_AGGREGATOR_MAX_DISK_USAGE_GB = 1 LOG_AGGREGATOR_MAX_DISK_USAGE_PATH = '/var/lib/awx' LOG_AGGREGATOR_RSYSLOGD_DEBUG = False # The number of retry attempts for websocket session establishment # If you're encountering issues establishing websockets in clustered Tower, # raising this value can help CHANNEL_LAYER_RECEIVE_MAX_RETRY = 10 ASGI_APPLICATION = "awx.main.routing.application" CHANNEL_LAYERS = { "default": { "BACKEND": "awx.main.consumers.ExpiringRedisChannelLayer", "CONFIG": { "hosts": [BROKER_URL], "capacity": 10000, "group_expiry": 157784760, # 5 years }, }, } # Logging configuration. LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'filters': { 'require_debug_false': { '()': 'django.utils.log.RequireDebugFalse', }, 'require_debug_true': { '()': 'django.utils.log.RequireDebugTrue', }, 'require_debug_true_or_test': { '()': 'awx.main.utils.RequireDebugTrueOrTest', }, 'external_log_enabled': { '()': 'awx.main.utils.filters.ExternalLoggerEnabled' }, 'dynamic_level_filter': { '()': 'awx.main.utils.filters.DynamicLevelFilter' } }, 'formatters': { 'simple': { 'format': '%(asctime)s %(levelname)-8s %(name)s %(message)s', }, 'json': { '()': 'awx.main.utils.formatters.LogstashFormatter' }, 'timed_import': { '()': 'awx.main.utils.formatters.TimeFormatter', 'format': '%(relativeSeconds)9.3f %(levelname)-8s %(message)s' }, 'dispatcher': { 'format': '%(asctime)s %(levelname)-8s %(name)s PID:%(process)d %(message)s', }, }, 'handlers': { 'console': { '()': 'logging.StreamHandler', 'level': 'DEBUG', 'filters': ['require_debug_true_or_test'], 'formatter': 'simple', }, 'null': { 'class': 'logging.NullHandler', }, 'file': { 'class': 'logging.NullHandler', 'formatter': 'simple', }, 'syslog': { 'level': 'WARNING', 'filters': ['require_debug_false'], 'class': 'logging.NullHandler', 'formatter': 'simple', }, 'external_logger': { 'class': 'awx.main.utils.handlers.RSysLogHandler', 'formatter': 'json', 'address': '/var/run/awx-rsyslog/rsyslog.sock', 'filters': ['external_log_enabled', 'dynamic_level_filter'], }, 'tower_warnings': { # don't define a level here, it's set by settings.LOG_AGGREGATOR_LEVEL 'class': 'logging.handlers.RotatingFileHandler', 'filters': ['require_debug_false', 'dynamic_level_filter'], 'filename': os.path.join(LOG_ROOT, 'tower.log'), 'maxBytes': 1024 * 1024 * 5, # 5 MB 'backupCount': 5, 'formatter':'simple', }, 'callback_receiver': { # don't define a level here, it's set by settings.LOG_AGGREGATOR_LEVEL 'class': 'logging.handlers.RotatingFileHandler', 'filters': ['require_debug_false', 'dynamic_level_filter'], 'filename': os.path.join(LOG_ROOT, 'callback_receiver.log'), 'maxBytes': 1024 * 1024 * 5, # 5 MB 'backupCount': 5, 'formatter':'simple', }, 'dispatcher': { # don't define a level here, it's set by settings.LOG_AGGREGATOR_LEVEL 'class': 'logging.handlers.RotatingFileHandler', 'filters': ['require_debug_false', 'dynamic_level_filter'], 'filename': os.path.join(LOG_ROOT, 'dispatcher.log'), 'maxBytes': 1024 * 1024 * 5, # 5 MB 'backupCount': 5, 'formatter':'dispatcher', }, 'wsbroadcast': { # don't define a level here, it's set by settings.LOG_AGGREGATOR_LEVEL 'class': 'logging.handlers.RotatingFileHandler', 'filters': ['require_debug_false', 'dynamic_level_filter'], 'filename': os.path.join(LOG_ROOT, 'wsbroadcast.log'), 'maxBytes': 1024 * 1024 * 5, # 5 MB 'backupCount': 5, 'formatter':'simple', }, 'celery.beat': { 'class':'logging.StreamHandler', 'level': 'ERROR' }, # don't log every celerybeat wakeup 'inventory_import': { 'level': 'DEBUG', 'class':'logging.StreamHandler', 'formatter': 'timed_import', }, 'task_system': { # don't define a level here, it's set by settings.LOG_AGGREGATOR_LEVEL 'class': 'logging.handlers.RotatingFileHandler', 'filters': ['require_debug_false', 'dynamic_level_filter'], 'filename': os.path.join(LOG_ROOT, 'task_system.log'), 'maxBytes': 1024 * 1024 * 5, # 5 MB 'backupCount': 5, 'formatter':'simple', }, 'management_playbooks': { 'level': 'DEBUG', 'class':'logging.handlers.RotatingFileHandler', 'filters': ['require_debug_false'], 'filename': os.path.join(LOG_ROOT, 'management_playbooks.log'), 'maxBytes': 1024 * 1024 * 5, # 5 MB 'backupCount': 5, 'formatter':'simple', }, 'system_tracking_migrations': { 'level': 'WARNING', 'class':'logging.handlers.RotatingFileHandler', 'filters': ['require_debug_false'], 'filename': os.path.join(LOG_ROOT, 'tower_system_tracking_migrations.log'), 'maxBytes': 1024 * 1024 * 5, # 5 MB 'backupCount': 5, 'formatter':'simple', }, 'rbac_migrations': { 'level': 'WARNING', 'class':'logging.handlers.RotatingFileHandler', 'filters': ['require_debug_false'], 'filename': os.path.join(LOG_ROOT, 'tower_rbac_migrations.log'), 'maxBytes': 1024 * 1024 * 5, # 5 MB 'backupCount': 5, 'formatter':'simple', }, }, 'loggers': { 'django': { 'handlers': ['console'], }, 'django.request': { 'handlers': ['console', 'file', 'tower_warnings'], 'level': 'WARNING', }, 'daphne': { 'handlers': ['console', 'file', 'tower_warnings'], 'level': 'INFO', }, 'rest_framework.request': { 'handlers': ['console', 'file', 'tower_warnings'], 'level': 'WARNING', 'propagate': False, }, 'py.warnings': { 'handlers': ['console'], }, 'awx': { 'handlers': ['console', 'file', 'tower_warnings', 'external_logger'], 'level': 'DEBUG', }, 'awx.conf': { 'handlers': ['null'], 'level': 'WARNING', }, 'awx.conf.settings': { 'handlers': ['null'], 'level': 'WARNING', }, 'awx.main': { 'handlers': ['null'] }, 'awx.main.commands.run_callback_receiver': { 'handlers': ['callback_receiver'], # level handled by dynamic_level_filter }, 'awx.main.dispatch': { 'handlers': ['dispatcher'], }, 'awx.main.consumers': { 'handlers': ['console', 'file', 'tower_warnings'], 'level': 'INFO', }, 'awx.main.wsbroadcast': { 'handlers': ['wsbroadcast'], }, 'awx.isolated.manager.playbooks': { 'handlers': ['management_playbooks'], 'propagate': False }, 'awx.main.commands.inventory_import': { 'handlers': ['inventory_import'], 'propagate': False }, 'awx.main.tasks': { 'handlers': ['task_system', 'external_logger'], 'propagate': False }, 'awx.main.scheduler': { 'handlers': ['task_system', 'external_logger'], 'propagate': False }, 'awx.main.access': { 'level': 'INFO', # very verbose debug-level logs }, 'awx.main.signals': { 'level': 'INFO', # very verbose debug-level logs }, 'awx.api.permissions': { 'level': 'INFO', # very verbose debug-level logs }, 'awx.analytics': { 'handlers': ['external_logger'], 'level': 'INFO', 'propagate': False }, 'django_auth_ldap': { 'handlers': ['console', 'file', 'tower_warnings'], 'level': 'DEBUG', }, 'social': { 'handlers': ['console', 'file', 'tower_warnings'], 'level': 'DEBUG', }, 'system_tracking_migrations': { 'handlers': ['console', 'file', 'tower_warnings'], 'level': 'DEBUG', }, 'rbac_migrations': { 'handlers': ['console', 'file', 'tower_warnings'], 'level': 'DEBUG', }, } } # Apply coloring to messages logged to the console COLOR_LOGS = False # https://github.com/django-polymorphic/django-polymorphic/issues/195 # FIXME: Disabling models.E006 warning until we can renamed Project and InventorySource SILENCED_SYSTEM_CHECKS = ['models.E006'] # Use middleware to get request statistics AWX_REQUEST_PROFILE = False # # Optionally, AWX can generate DOT graphs # (http://www.graphviz.org/doc/info/lang.html) for per-request profiling # via gprof2dot (https://github.com/jrfonseca/gprof2dot) # # If you set this to True, you must `/var/lib/awx/venv/awx/bin/pip install gprof2dot` # .dot files will be saved in `/var/log/tower/profile/` and can be converted e.g., # # ~ yum install graphviz # ~ dot -o profile.png -Tpng /var/log/tower/profile/some-profile-data.dot # AWX_REQUEST_PROFILE_WITH_DOT = False # Allow profiling callback workers via SIGUSR1 AWX_CALLBACK_PROFILE = False # Delete temporary directories created to store playbook run-time AWX_CLEANUP_PATHS = True MIDDLEWARE = [ 'awx.main.middleware.TimingMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'awx.main.middleware.MigrationRanCheckMiddleware', 'corsheaders.middleware.CorsMiddleware', 'django.middleware.locale.LocaleMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'awx.sso.middleware.SocialAuthMiddleware', 'crum.CurrentRequestUserMiddleware', 'awx.main.middleware.URLModificationMiddleware', 'awx.main.middleware.SessionTimeoutMiddleware', ] # Secret header value to exchange for websockets responsible for distributing websocket messages. # This needs to be kept secret and randomly generated BROADCAST_WEBSOCKET_SECRET = '' # Port for broadcast websockets to connect to # Note: that the clients will follow redirect responses BROADCAST_WEBSOCKET_PORT = 443 # Whether or not broadcast websockets should check nginx certs when interconnecting BROADCAST_WEBSOCKET_VERIFY_CERT = False # Connect to other AWX nodes using http or https BROADCAST_WEBSOCKET_PROTOCOL = 'https' # All websockets that connect to the broadcast websocket endpoint will be put into this group BROADCAST_WEBSOCKET_GROUP_NAME = 'broadcast-group_send' # Time wait before retrying connecting to a websocket broadcast tower node BROADCAST_WEBSOCKET_RECONNECT_RETRY_RATE_SECONDS = 5 # How often websocket process will look for changes in the Instance table BROADCAST_WEBSOCKET_NEW_INSTANCE_POLL_RATE_SECONDS = 10 # How often websocket process will generate stats BROADCAST_WEBSOCKET_STATS_POLL_RATE_SECONDS = 5
GoogleCloudPlatform/sap-deployment-automation
third_party/github.com/ansible/awx/awx/settings/defaults.py
Python
apache-2.0
42,385
[ "Galaxy" ]
4f75f1088e283a77b3f016bd5cf60431e8b686790ad9653b1f653c74fac31ad8
import logging import os import time import matplotlib.pyplot as plt import seaborn as sns import mbuild as mb import metamds as mds import mdtraj as md from simgen.project import Project OFFLINE = True def build_ethane_box(box, n_molecules, **kwargs): from mbuild.examples import Ethane ethane = Ethane() full_box = mb.fill_box(ethane, n_molecules, box) full_box.name = '{}_ethanes'.format(n_molecules) return full_box def generate_code(**parameters): # import pdb; pdb.set_trace() if OFFLINE: # load simgen files from local folders res_dir = os.path.join(os.path.dirname(__file__), 'binary_lj_sim') manifest = { 'title': 'Binary LJ Simulation Test Project with mBuild', 'code_path': [os.path.join(res_dir, 'code')], 'concept_path': [os.path.join(res_dir, 'concepts')], 'template_path': [os.path.join(res_dir, 'templates')] } project = Project(manifest) else: # load sigmen files from GitHub project = Project(os.path.join(os.path.dirname(__file__), 'binary_lj_sim', 'online_project.yaml')) return project.render_tasks('prg', output_dir='./', inject_dict=parameters) if __name__ == '__main__': # # configure logging # logging.basicConfig(format='%(module)s:%(lineno)d %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p', level=0) # Build the initial configuration ethane_box = build_ethane_box(n_molecules=200, box=[3, 3, 3]) # ethane_box.visualize() # Input parameters parameters = {'compound': ethane_box, 'forcefield': 'OPLS-aa', 'system_name': 'ethane_box'} # Initialize a simulation instance with a template and some metadata sim = mds.Simulation(name='ethane', template=generate_code, input_dir='static_input_files', output_dir='simgen_output') # Parameterize our simulation template task = sim.parametrize(**parameters) # import pdb; pdb.set_trace() # Run task.execute() # task.execute(hostname='rahman.vuse.vanderbilt.edu', username='ctk3b') # print(task.status()) # time.sleep(10) # task.sync() # Analyze trajectories = task.get_output_files('trajectories') topologies = task.get_output_files('topologies') # Pick which one to select? # import pdb; pdb.set_trace() trj_path = os.path.join(task.output_dir, 'nvt.xtc') top_path = os.path.join(task.output_dir, 'em.gro') traj = md.load(trj_path, top=top_path) print(traj) # import pdb; pdb.set_trace() # RDF # pairs = traj.top.select_pairs('name C', 'name C') # r, g_r = md.compute_rdf(traj, pairs) # plt.plot(r, g_r) # plt.xlabel('r (nm)') # plt.ylabel('g(r)') # plt.show() # # s2 = md.compute_nematic_order(traj, 'residues') # plt.plot(traj.time, s2) # plt.xlabel('time (ps)') # plt.ylabel('S2')
iModels/demos
demos/ethane_box/ethane_box_simgen.py
Python
mit
2,906
[ "MDTraj" ]
25c89c20f0909c9c27c418c4d65e660df7e9fa9dc3257ab24f2755f078f0a0a7
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 Rackspace # Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2013 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fixtures import mox from oslo.config import cfg from nova import context from nova import db from nova.db.sqlalchemy import models from nova import exception from nova import ipv6 from nova.network import floating_ips from nova.network import linux_net from nova.network import manager as network_manager from nova.network import model as net_model from nova.openstack.common.db import exception as db_exc from nova.openstack.common import importutils from nova.openstack.common import log as logging from nova.openstack.common import processutils from nova.openstack.common import rpc from nova.openstack.common.rpc import common as rpc_common from nova import quota from nova import test from nova.tests import fake_ldap from nova.tests import fake_network from nova.tests import matchers from nova import utils CONF = cfg.CONF LOG = logging.getLogger(__name__) HOST = "testhost" FAKEUUID = "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa" networks = [{'id': 0, 'uuid': FAKEUUID, 'label': 'test0', 'injected': False, 'multi_host': False, 'cidr': '192.168.0.0/24', 'cidr_v6': '2001:db8::/64', 'gateway_v6': '2001:db8::1', 'netmask_v6': '64', 'netmask': '255.255.255.0', 'bridge': 'fa0', 'bridge_interface': 'fake_fa0', 'gateway': '192.168.0.1', 'broadcast': '192.168.0.255', 'dns1': '192.168.0.1', 'dns2': '192.168.0.2', 'vlan': None, 'host': HOST, 'project_id': 'fake_project', 'vpn_public_address': '192.168.0.2', 'vpn_public_port': '22', 'vpn_private_address': '10.0.0.2'}, {'id': 1, 'uuid': 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', 'label': 'test1', 'injected': False, 'multi_host': False, 'cidr': '192.168.1.0/24', 'cidr_v6': '2001:db9::/64', 'gateway_v6': '2001:db9::1', 'netmask_v6': '64', 'netmask': '255.255.255.0', 'bridge': 'fa1', 'bridge_interface': 'fake_fa1', 'gateway': '192.168.1.1', 'broadcast': '192.168.1.255', 'dns1': '192.168.0.1', 'dns2': '192.168.0.2', 'vlan': None, 'host': HOST, 'project_id': 'fake_project', 'vpn_public_address': '192.168.1.2', 'vpn_public_port': '22', 'vpn_private_address': '10.0.0.2'}] fixed_ips = [{'id': 0, 'network_id': FAKEUUID, 'address': '192.168.0.100', 'instance_uuid': 0, 'allocated': False, 'virtual_interface_id': 0, 'floating_ips': []}, {'id': 0, 'network_id': 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', 'address': '192.168.1.100', 'instance_uuid': 0, 'allocated': False, 'virtual_interface_id': 0, 'floating_ips': []}] flavor = {'id': 0, 'rxtx_cap': 3} floating_ip_fields = {'id': 0, 'address': '192.168.10.100', 'pool': 'nova', 'interface': 'eth0', 'fixed_ip_id': 0, 'project_id': None, 'auto_assigned': False} vifs = [{'id': 0, 'address': 'DE:AD:BE:EF:00:00', 'uuid': '00000000-0000-0000-0000-0000000000000000', 'network_id': 0, 'instance_uuid': 0}, {'id': 1, 'address': 'DE:AD:BE:EF:00:01', 'uuid': '00000000-0000-0000-0000-0000000000000001', 'network_id': 1, 'instance_uuid': 0}, {'id': 2, 'address': 'DE:AD:BE:EF:00:02', 'uuid': '00000000-0000-0000-0000-0000000000000002', 'network_id': 2, 'instance_uuid': 0}] class FlatNetworkTestCase(test.TestCase): def setUp(self): super(FlatNetworkTestCase, self).setUp() self.tempdir = self.useFixture(fixtures.TempDir()).path self.flags(log_dir=self.tempdir) self.network = network_manager.FlatManager(host=HOST) self.network.instance_dns_domain = '' self.network.db = db self.context = context.RequestContext('testuser', 'testproject', is_admin=False) def test_get_instance_nw_info(self): fake_get_instance_nw_info = fake_network.fake_get_instance_nw_info nw_info = fake_get_instance_nw_info(self.stubs, 0, 2) self.assertFalse(nw_info) nw_info = fake_get_instance_nw_info(self.stubs, 1, 2) for i, (nw, info) in enumerate(nw_info): nid = i + 1 check = {'bridge': 'fake_br%d' % nid, 'cidr': '192.168.%s.0/24' % nid, 'cidr_v6': '2001:db8:0:%x::/64' % nid, 'id': '00000000-0000-0000-0000-00000000000000%02d' % nid, 'multi_host': False, 'injected': False, 'bridge_interface': None, 'vlan': None} self.assertThat(nw, matchers.DictMatches(check)) check = {'broadcast': '192.168.%d.255' % nid, 'dhcp_server': '192.168.1.1', 'dns': ['192.168.%d.3' % nid, '192.168.%d.4' % nid], 'gateway': '192.168.%d.1' % nid, 'gateway_v6': '2001:db8:0:1::1', 'ip6s': 'DONTCARE', 'ips': 'DONTCARE', 'label': 'test%d' % nid, 'mac': 'DE:AD:BE:EF:00:%02x' % nid, 'rxtx_cap': 30, 'vif_type': net_model.VIF_TYPE_BRIDGE, 'vif_devname': None, 'vif_uuid': '00000000-0000-0000-0000-00000000000000%02d' % nid, 'ovs_interfaceid': None, 'qbh_params': None, 'qbg_params': None, 'should_create_vlan': False, 'should_create_bridge': False} self.assertThat(info, matchers.DictMatches(check)) check = [{'enabled': 'DONTCARE', 'ip': '2001:db8:0:1::%x' % nid, 'netmask': 64, 'gateway': '2001:db8:0:1::1'}] self.assertThat(info['ip6s'], matchers.DictListMatches(check)) num_fixed_ips = len(info['ips']) check = [{'enabled': 'DONTCARE', 'ip': '192.168.%d.%03d' % (nid, ip_num + 99), 'netmask': '255.255.255.0', 'gateway': '192.168.%d.1' % nid} for ip_num in xrange(1, num_fixed_ips + 1)] self.assertThat(info['ips'], matchers.DictListMatches(check)) def test_validate_networks(self): self.mox.StubOutWithMock(db, 'network_get') self.mox.StubOutWithMock(db, 'network_get_all_by_uuids') self.mox.StubOutWithMock(db, 'fixed_ip_get_by_address') requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', '192.168.1.100'), ('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', '192.168.0.100')] db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(), project_only=mox.IgnoreArg()).AndReturn(networks[:]) db.network_get(mox.IgnoreArg(), mox.IgnoreArg(), project_only=mox.IgnoreArg()).AndReturn(networks[1]) db.network_get(mox.IgnoreArg(), mox.IgnoreArg(), project_only=mox.IgnoreArg()).AndReturn(networks[0]) ip = fixed_ips[1].copy() ip['instance_uuid'] = None db.fixed_ip_get_by_address(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(ip) ip = fixed_ips[0].copy() ip['instance_uuid'] = None db.fixed_ip_get_by_address(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(ip) self.mox.ReplayAll() self.network.validate_networks(self.context, requested_networks) def test_validate_reserved(self): context_admin = context.RequestContext('testuser', 'testproject', is_admin=True) nets = self.network.create_networks(context_admin, 'fake', '192.168.0.0/24', False, 1, 256, None, None, None, None, None) self.assertEqual(1, len(nets)) network = nets[0] self.assertEqual(3, db.network_count_reserved_ips(context_admin, network['id'])) def test_validate_networks_none_requested_networks(self): self.network.validate_networks(self.context, None) def test_validate_networks_empty_requested_networks(self): requested_networks = [] self.mox.ReplayAll() self.network.validate_networks(self.context, requested_networks) def test_validate_networks_invalid_fixed_ip(self): self.mox.StubOutWithMock(db, 'network_get_all_by_uuids') requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', '192.168.1.100.1'), ('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', '192.168.0.100.1')] db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(), project_only=mox.IgnoreArg()).AndReturn(networks[:]) self.mox.ReplayAll() self.assertRaises(exception.FixedIpInvalid, self.network.validate_networks, self.context, requested_networks) def test_validate_networks_empty_fixed_ip(self): self.mox.StubOutWithMock(db, 'network_get_all_by_uuids') requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', ''), ('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', '')] db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(), project_only=mox.IgnoreArg()).AndReturn(networks[:]) self.mox.ReplayAll() self.assertRaises(exception.FixedIpInvalid, self.network.validate_networks, self.context, requested_networks) def test_validate_networks_none_fixed_ip(self): self.mox.StubOutWithMock(db, 'network_get_all_by_uuids') requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', None), ('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', None)] db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(), project_only=mox.IgnoreArg()).AndReturn(networks[:]) self.mox.ReplayAll() self.network.validate_networks(self.context, requested_networks) def test_add_fixed_ip_instance_using_id_without_vpn(self): self.mox.StubOutWithMock(db, 'network_get') self.mox.StubOutWithMock(db, 'network_update') self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool') self.mox.StubOutWithMock(db, 'virtual_interface_get_by_instance_and_network') self.mox.StubOutWithMock(db, 'fixed_ip_update') self.mox.StubOutWithMock(quota.QUOTAS, 'reserve') self.mox.StubOutWithMock(db, 'instance_get_by_uuid') self.mox.StubOutWithMock(self.network, 'get_instance_nw_info') db.fixed_ip_associate_pool(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn('192.168.0.101') db.instance_get_by_uuid(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({'security_groups': [{'id': 0, 'name': 'test'}]}) db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({'id': 0}) db.fixed_ip_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) quota.QUOTAS.reserve(mox.IgnoreArg(), fixed_ips=mox.IgnoreArg()).AndReturn(None) db.instance_get_by_uuid(self.context, mox.IgnoreArg()).AndReturn({'display_name': HOST, 'uuid': FAKEUUID}) db.network_get(mox.IgnoreArg(), mox.IgnoreArg(), project_only=mox.IgnoreArg()).AndReturn(networks[0]) db.network_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) self.network.get_instance_nw_info(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) self.mox.ReplayAll() self.network.add_fixed_ip_to_instance(self.context, FAKEUUID, HOST, networks[0]['id']) def test_add_fixed_ip_instance_using_uuid_without_vpn(self): self.mox.StubOutWithMock(db, 'network_get_by_uuid') self.mox.StubOutWithMock(db, 'network_update') self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool') self.mox.StubOutWithMock(db, 'virtual_interface_get_by_instance_and_network') self.mox.StubOutWithMock(db, 'fixed_ip_update') self.mox.StubOutWithMock(quota.QUOTAS, 'reserve') self.mox.StubOutWithMock(db, 'instance_get_by_uuid') self.mox.StubOutWithMock(self.network, 'get_instance_nw_info') db.fixed_ip_associate_pool(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn('192.168.0.101') db.instance_get_by_uuid(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({'security_groups': [{'id': 0, 'name': 'test'}]}) db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({'id': 0}) db.fixed_ip_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) quota.QUOTAS.reserve(mox.IgnoreArg(), fixed_ips=mox.IgnoreArg()).AndReturn(None) db.instance_get_by_uuid(self.context, mox.IgnoreArg()).AndReturn({'display_name': HOST, 'uuid': FAKEUUID}) db.network_get_by_uuid(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(networks[0]) db.network_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) self.network.get_instance_nw_info(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) self.mox.ReplayAll() self.network.add_fixed_ip_to_instance(self.context, FAKEUUID, HOST, networks[0]['uuid']) def test_mini_dns_driver(self): zone1 = "example.org" zone2 = "example.com" driver = self.network.instance_dns_manager driver.create_entry("hostone", "10.0.0.1", "A", zone1) driver.create_entry("hosttwo", "10.0.0.2", "A", zone1) driver.create_entry("hostthree", "10.0.0.3", "A", zone1) driver.create_entry("hostfour", "10.0.0.4", "A", zone1) driver.create_entry("hostfive", "10.0.0.5", "A", zone2) driver.delete_entry("hostone", zone1) driver.modify_address("hostfour", "10.0.0.1", zone1) driver.modify_address("hostthree", "10.0.0.1", zone1) names = driver.get_entries_by_address("10.0.0.1", zone1) self.assertEqual(len(names), 2) self.assertIn('hostthree', names) self.assertIn('hostfour', names) names = driver.get_entries_by_address("10.0.0.5", zone2) self.assertEqual(len(names), 1) self.assertIn('hostfive', names) addresses = driver.get_entries_by_name("hosttwo", zone1) self.assertEqual(len(addresses), 1) self.assertIn('10.0.0.2', addresses) self.assertRaises(exception.InvalidInput, driver.create_entry, "hostname", "10.10.10.10", "invalidtype", zone1) def test_mini_dns_driver_with_mixed_case(self): zone1 = "example.org" driver = self.network.instance_dns_manager driver.create_entry("HostTen", "10.0.0.10", "A", zone1) addresses = driver.get_entries_by_address("10.0.0.10", zone1) self.assertEqual(len(addresses), 1) for n in addresses: driver.delete_entry(n, zone1) addresses = driver.get_entries_by_address("10.0.0.10", zone1) self.assertEqual(len(addresses), 0) def test_instance_dns(self): fixedip = '192.168.0.101' self.mox.StubOutWithMock(db, 'network_get_by_uuid') self.mox.StubOutWithMock(db, 'network_update') self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool') self.mox.StubOutWithMock(db, 'virtual_interface_get_by_instance_and_network') self.mox.StubOutWithMock(db, 'fixed_ip_update') self.mox.StubOutWithMock(db, 'instance_get_by_uuid') self.mox.StubOutWithMock(self.network, 'get_instance_nw_info') self.mox.StubOutWithMock(quota.QUOTAS, 'reserve') db.fixed_ip_associate_pool(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(fixedip) db.instance_get_by_uuid(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({'security_groups': [{'id': 0, 'name': 'test'}]}) db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({'id': 0}) db.fixed_ip_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) quota.QUOTAS.reserve(mox.IgnoreArg(), fixed_ips=mox.IgnoreArg()).AndReturn(None) db.instance_get_by_uuid(self.context, mox.IgnoreArg()).AndReturn({'display_name': HOST, 'uuid': FAKEUUID}) db.network_get_by_uuid(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(networks[0]) db.network_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) self.network.get_instance_nw_info(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) self.mox.ReplayAll() self.network.add_fixed_ip_to_instance(self.context, FAKEUUID, HOST, networks[0]['uuid']) instance_manager = self.network.instance_dns_manager addresses = instance_manager.get_entries_by_name(HOST, self.network.instance_dns_domain) self.assertEqual(len(addresses), 1) self.assertEqual(addresses[0], fixedip) addresses = instance_manager.get_entries_by_name(FAKEUUID, self.network.instance_dns_domain) self.assertEqual(len(addresses), 1) self.assertEqual(addresses[0], fixedip) def test_allocate_floating_ip(self): self.assertEqual(self.network.allocate_floating_ip(self.context, 1, None), None) def test_deallocate_floating_ip(self): self.assertEqual(self.network.deallocate_floating_ip(self.context, 1, None), None) def test_associate_floating_ip(self): self.assertEqual(self.network.associate_floating_ip(self.context, None, None), None) def test_disassociate_floating_ip(self): self.assertEqual(self.network.disassociate_floating_ip(self.context, None, None), None) def test_get_networks_by_uuids_ordering(self): self.mox.StubOutWithMock(db, 'network_get_all_by_uuids') requested_networks = ['bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'] db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(), project_only=mox.IgnoreArg()).AndReturn(networks[:]) self.mox.ReplayAll() res = self.network._get_networks_by_uuids(self.context, requested_networks) self.assertEqual(res[0]['id'], 1) self.assertEqual(res[1]['id'], 0) class VlanNetworkTestCase(test.TestCase): def setUp(self): super(VlanNetworkTestCase, self).setUp() self.useFixture(test.SampleNetworks()) self.network = network_manager.VlanManager(host=HOST) self.network.db = db self.context = context.RequestContext('testuser', 'testproject', is_admin=False) def test_vpn_allocate_fixed_ip(self): self.mox.StubOutWithMock(db, 'fixed_ip_associate') self.mox.StubOutWithMock(db, 'fixed_ip_update') self.mox.StubOutWithMock(db, 'virtual_interface_get_by_instance_and_network') self.mox.StubOutWithMock(db, 'instance_get_by_uuid') db.fixed_ip_associate(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(), reserved=True).AndReturn('192.168.0.1') db.fixed_ip_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({'id': 0}) db.instance_get_by_uuid(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({'display_name': HOST, 'uuid': FAKEUUID}) self.mox.ReplayAll() network = dict(networks[0]) network['vpn_private_address'] = '192.168.0.2' self.network.allocate_fixed_ip(self.context, FAKEUUID, network, vpn=True) def test_vpn_allocate_fixed_ip_no_network_id(self): network = dict(networks[0]) network['vpn_private_address'] = '192.168.0.2' network['id'] = None instance = db.instance_create(self.context, {}) context_admin = context.RequestContext('testuser', 'testproject', is_admin=True) self.assertRaises(exception.FixedIpNotFoundForNetwork, self.network.allocate_fixed_ip, context_admin, instance['uuid'], network, vpn=True) def test_allocate_fixed_ip(self): self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool') self.mox.StubOutWithMock(db, 'fixed_ip_update') self.mox.StubOutWithMock(db, 'virtual_interface_get_by_instance_and_network') self.mox.StubOutWithMock(db, 'instance_get_by_uuid') db.instance_get_by_uuid(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({'security_groups': [{'id': 0}]}) db.fixed_ip_associate_pool(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn('192.168.0.1') db.fixed_ip_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({'id': 0}) db.instance_get_by_uuid(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({'display_name': HOST, 'uuid': FAKEUUID}) self.mox.ReplayAll() network = dict(networks[0]) network['vpn_private_address'] = '192.168.0.2' self.network.allocate_fixed_ip(self.context, FAKEUUID, network) def test_create_networks_too_big(self): self.assertRaises(ValueError, self.network.create_networks, None, num_networks=4094, vlan_start=1) def test_create_networks_too_many(self): self.assertRaises(ValueError, self.network.create_networks, None, num_networks=100, vlan_start=1, cidr='192.168.0.1/24', network_size=100) def test_validate_networks(self): def network_get(_context, network_id, project_only='allow_none'): return networks[network_id] self.stubs.Set(db, 'network_get', network_get) self.mox.StubOutWithMock(db, 'network_get_all_by_uuids') self.mox.StubOutWithMock(db, "fixed_ip_get_by_address") requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', '192.168.1.100'), ('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', '192.168.0.100')] db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(), project_only=mox.IgnoreArg()).AndReturn(networks[:]) fixed_ips[1]['network_id'] = networks[1]['id'] fixed_ips[1]['instance_uuid'] = None db.fixed_ip_get_by_address(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(fixed_ips[1]) fixed_ips[0]['network_id'] = networks[0]['id'] fixed_ips[0]['instance_uuid'] = None db.fixed_ip_get_by_address(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(fixed_ips[0]) self.mox.ReplayAll() self.network.validate_networks(self.context, requested_networks) def test_validate_networks_none_requested_networks(self): self.network.validate_networks(self.context, None) def test_validate_networks_empty_requested_networks(self): requested_networks = [] self.mox.ReplayAll() self.network.validate_networks(self.context, requested_networks) def test_validate_networks_invalid_fixed_ip(self): self.mox.StubOutWithMock(db, 'network_get_all_by_uuids') requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', '192.168.1.100.1'), ('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', '192.168.0.100.1')] db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(), project_only=mox.IgnoreArg()).AndReturn(networks[:]) self.mox.ReplayAll() self.assertRaises(exception.FixedIpInvalid, self.network.validate_networks, self.context, requested_networks) def test_validate_networks_empty_fixed_ip(self): self.mox.StubOutWithMock(db, 'network_get_all_by_uuids') requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', ''), ('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', '')] db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(), project_only=mox.IgnoreArg()).AndReturn(networks[:]) self.mox.ReplayAll() self.assertRaises(exception.FixedIpInvalid, self.network.validate_networks, self.context, requested_networks) def test_validate_networks_none_fixed_ip(self): self.mox.StubOutWithMock(db, 'network_get_all_by_uuids') requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', None), ('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', None)] db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(), project_only=mox.IgnoreArg()).AndReturn(networks[:]) self.mox.ReplayAll() self.network.validate_networks(self.context, requested_networks) def test_floating_ip_owned_by_project(self): ctxt = context.RequestContext('testuser', 'testproject', is_admin=False) # raises because floating_ip project_id is None floating_ip = {'address': '10.0.0.1', 'project_id': None} self.assertRaises(exception.NotAuthorized, self.network._floating_ip_owned_by_project, ctxt, floating_ip) # raises because floating_ip project_id is not equal to ctxt project_id floating_ip = {'address': '10.0.0.1', 'project_id': ctxt.project_id + '1'} self.assertRaises(exception.NotAuthorized, self.network._floating_ip_owned_by_project, ctxt, floating_ip) # does not raise (floating ip is owned by ctxt project) floating_ip = {'address': '10.0.0.1', 'project_id': ctxt.project_id} self.network._floating_ip_owned_by_project(ctxt, floating_ip) ctxt = context.RequestContext(None, None, is_admin=True) # does not raise (ctxt is admin) floating_ip = {'address': '10.0.0.1', 'project_id': None} self.network._floating_ip_owned_by_project(ctxt, floating_ip) # does not raise (ctxt is admin) floating_ip = {'address': '10.0.0.1', 'project_id': 'testproject'} self.network._floating_ip_owned_by_project(ctxt, floating_ip) def test_allocate_floating_ip(self): ctxt = context.RequestContext('testuser', 'testproject', is_admin=False) def fake_allocate_address(*args, **kwargs): return {'address': '10.0.0.1', 'project_id': ctxt.project_id} self.stubs.Set(self.network.db, 'floating_ip_allocate_address', fake_allocate_address) self.network.allocate_floating_ip(ctxt, ctxt.project_id) def test_deallocate_floating_ip(self): ctxt = context.RequestContext('testuser', 'testproject', is_admin=False) def fake1(*args, **kwargs): pass def fake2(*args, **kwargs): return {'address': '10.0.0.1', 'fixed_ip_id': 1} def fake3(*args, **kwargs): return {'address': '10.0.0.1', 'fixed_ip_id': None, 'project_id': ctxt.project_id} self.stubs.Set(self.network.db, 'floating_ip_deallocate', fake1) self.stubs.Set(self.network, '_floating_ip_owned_by_project', fake1) # this time should raise because floating ip is associated to fixed_ip self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake2) self.assertRaises(exception.FloatingIpAssociated, self.network.deallocate_floating_ip, ctxt, mox.IgnoreArg()) # this time should not raise self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake3) self.network.deallocate_floating_ip(ctxt, ctxt.project_id) def test_associate_floating_ip(self): ctxt = context.RequestContext('testuser', 'testproject', is_admin=False) def fake1(*args, **kwargs): return {'address': '10.0.0.1', 'network': 'fakenet'} # floating ip that's already associated def fake2(*args, **kwargs): return {'address': '10.0.0.1', 'pool': 'nova', 'interface': 'eth0', 'fixed_ip_id': 1} # floating ip that isn't associated def fake3(*args, **kwargs): return {'address': '10.0.0.1', 'pool': 'nova', 'interface': 'eth0', 'fixed_ip_id': None} # fixed ip with remote host def fake4(*args, **kwargs): return {'address': '10.0.0.1', 'pool': 'nova', 'instance_uuid': FAKEUUID, 'interface': 'eth0', 'network_id': 'blah'} def fake4_network(*args, **kwargs): return {'multi_host': False, 'host': 'jibberjabber'} # fixed ip with local host def fake5(*args, **kwargs): return {'address': '10.0.0.1', 'pool': 'nova', 'instance_uuid': FAKEUUID, 'interface': 'eth0', 'network_id': 'blahblah'} def fake5_network(*args, **kwargs): return {'multi_host': False, 'host': 'testhost'} def fake6(*args, **kwargs): self.local = False def fake7(*args, **kwargs): self.local = True def fake8(*args, **kwargs): raise processutils.ProcessExecutionError('', 'Cannot find device "em0"\n') def fake9(*args, **kwargs): raise test.TestingException() # raises because interface doesn't exist self.stubs.Set(self.network.db, 'floating_ip_fixed_ip_associate', fake1) self.stubs.Set(self.network.db, 'floating_ip_disassociate', fake1) self.stubs.Set(self.network.driver, 'ensure_floating_forward', fake8) self.assertRaises(exception.NoFloatingIpInterface, self.network._associate_floating_ip, ctxt, mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) self.stubs.Set(self.network, '_floating_ip_owned_by_project', fake1) # raises because floating_ip is already associated to a fixed_ip self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake2) self.stubs.Set(self.network, 'disassociate_floating_ip', fake9) def fake_fixed_ip_get(context, fixed_ip_id): return {'address': 'old', 'instance_uuid': 'fake_uuid'} self.stubs.Set(self.network.db, 'fixed_ip_get', fake_fixed_ip_get) # doesn't raise because we exit early if the address is the same self.network.associate_floating_ip(ctxt, mox.IgnoreArg(), 'old') # raises because we call disassociate which is mocked self.assertRaises(test.TestingException, self.network.associate_floating_ip, ctxt, mox.IgnoreArg(), 'new') self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake3) # does not raise and makes call remotely self.local = True self.stubs.Set(self.network.db, 'fixed_ip_get_by_address', fake4) self.stubs.Set(self.network.db, 'network_get', fake4_network) self.stubs.Set(rpc, 'call', fake6) self.network.associate_floating_ip(ctxt, mox.IgnoreArg(), mox.IgnoreArg()) self.assertFalse(self.local) # does not raise and makes call locally self.local = False self.stubs.Set(self.network.db, 'fixed_ip_get_by_address', fake5) self.stubs.Set(self.network.db, 'network_get', fake5_network) self.stubs.Set(self.network, '_associate_floating_ip', fake7) self.network.associate_floating_ip(ctxt, mox.IgnoreArg(), mox.IgnoreArg()) self.assertTrue(self.local) def test_add_floating_ip_nat_before_bind(self): # Tried to verify order with documented mox record/verify # functionality, but it doesn't seem to work since I can't make it # fail. I'm using stubs and a flag for now, but if this mox feature # can be made to work, it would be a better way to test this. # # self.mox.StubOutWithMock(self.network.driver, # 'ensure_floating_forward') # self.mox.StubOutWithMock(self.network.driver, 'bind_floating_ip') # # self.network.driver.ensure_floating_forward(mox.IgnoreArg(), # mox.IgnoreArg(), # mox.IgnoreArg(), # mox.IgnoreArg()) # self.network.driver.bind_floating_ip(mox.IgnoreArg(), # mox.IgnoreArg()) # self.mox.ReplayAll() nat_called = [False] def fake_nat(*args, **kwargs): nat_called[0] = True def fake_bind(*args, **kwargs): self.assertTrue(nat_called[0]) self.stubs.Set(self.network.driver, 'ensure_floating_forward', fake_nat) self.stubs.Set(self.network.driver, 'bind_floating_ip', fake_bind) self.network.l3driver.add_floating_ip('fakefloat', 'fakefixed', 'fakeiface', 'fakenet') def test_floating_ip_init_host(self): def get_all_by_host(_context, _host): return [{'interface': 'foo', 'address': 'foo'}, {'interface': 'fakeiface', 'address': 'fakefloat', 'fixed_ip_id': 1}, {'interface': 'bar', 'address': 'bar', 'fixed_ip_id': 2}] self.stubs.Set(self.network.db, 'floating_ip_get_all_by_host', get_all_by_host) def fixed_ip_get(_context, fixed_ip_id, get_network): if fixed_ip_id == 1: return {'address': 'fakefixed', 'network': 'fakenet'} raise exception.FixedIpNotFound(id=fixed_ip_id) self.stubs.Set(self.network.db, 'fixed_ip_get', fixed_ip_get) self.mox.StubOutWithMock(self.network.l3driver, 'add_floating_ip') self.flags(public_interface=False) self.network.l3driver.add_floating_ip('fakefloat', 'fakefixed', 'fakeiface', 'fakenet') self.mox.ReplayAll() self.network.init_host_floating_ips() self.mox.UnsetStubs() self.mox.VerifyAll() self.mox.StubOutWithMock(self.network.l3driver, 'add_floating_ip') self.flags(public_interface='fooiface') self.network.l3driver.add_floating_ip('fakefloat', 'fakefixed', 'fooiface', 'fakenet') self.mox.ReplayAll() self.network.init_host_floating_ips() self.mox.UnsetStubs() self.mox.VerifyAll() def test_disassociate_floating_ip(self): ctxt = context.RequestContext('testuser', 'testproject', is_admin=False) def fake1(*args, **kwargs): pass # floating ip that isn't associated def fake2(*args, **kwargs): return {'address': '10.0.0.1', 'pool': 'nova', 'interface': 'eth0', 'fixed_ip_id': None} # floating ip that is associated def fake3(*args, **kwargs): return {'address': '10.0.0.1', 'pool': 'nova', 'interface': 'eth0', 'fixed_ip_id': 1, 'project_id': ctxt.project_id} # fixed ip with remote host def fake4(*args, **kwargs): return {'address': '10.0.0.1', 'pool': 'nova', 'instance_uuid': FAKEUUID, 'interface': 'eth0', 'network_id': 'blah'} def fake4_network(*args, **kwargs): return {'multi_host': False, 'host': 'jibberjabber'} # fixed ip with local host def fake5(*args, **kwargs): return {'address': '10.0.0.1', 'pool': 'nova', 'instance_uuid': FAKEUUID, 'interface': 'eth0', 'network_id': 'blahblah'} def fake5_network(*args, **kwargs): return {'multi_host': False, 'host': 'testhost'} def fake6(*args, **kwargs): self.local = False def fake7(*args, **kwargs): self.local = True def fake8(*args, **kwargs): return {'address': '10.0.0.1', 'pool': 'nova', 'interface': 'eth0', 'fixed_ip_id': 1, 'auto_assigned': True, 'project_id': ctxt.project_id} self.stubs.Set(self.network, '_floating_ip_owned_by_project', fake1) # raises because floating_ip is not associated to a fixed_ip self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake2) self.assertRaises(exception.FloatingIpNotAssociated, self.network.disassociate_floating_ip, ctxt, mox.IgnoreArg()) self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake3) # does not raise and makes call remotely self.local = True self.stubs.Set(self.network.db, 'fixed_ip_get', fake4) self.stubs.Set(self.network.db, 'network_get', fake4_network) self.stubs.Set(rpc, 'call', fake6) self.network.disassociate_floating_ip(ctxt, mox.IgnoreArg()) self.assertFalse(self.local) # does not raise and makes call locally self.local = False self.stubs.Set(self.network.db, 'fixed_ip_get', fake5) self.stubs.Set(self.network.db, 'network_get', fake5_network) self.stubs.Set(self.network, '_disassociate_floating_ip', fake7) self.network.disassociate_floating_ip(ctxt, mox.IgnoreArg()) self.assertTrue(self.local) # raises because auto_assigned floating IP cannot be disassociated self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake8) self.assertRaises(exception.CannotDisassociateAutoAssignedFloatingIP, self.network.disassociate_floating_ip, ctxt, mox.IgnoreArg()) def test_add_fixed_ip_instance_without_vpn_requested_networks(self): self.mox.StubOutWithMock(db, 'network_get') self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool') self.mox.StubOutWithMock(db, 'virtual_interface_get_by_instance_and_network') self.mox.StubOutWithMock(db, 'fixed_ip_update') self.mox.StubOutWithMock(db, 'instance_get_by_uuid') self.mox.StubOutWithMock(self.network, 'get_instance_nw_info') db.fixed_ip_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({'id': 0}) db.instance_get_by_uuid(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({'security_groups': [{'id': 0}], 'availability_zone': '', 'uuid': FAKEUUID}) db.fixed_ip_associate_pool(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn('192.168.0.101') db.network_get(mox.IgnoreArg(), mox.IgnoreArg(), project_only=mox.IgnoreArg()).AndReturn(networks[0]) db.instance_get_by_uuid(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({'display_name': HOST, 'uuid': FAKEUUID}) self.network.get_instance_nw_info(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) self.mox.ReplayAll() self.network.add_fixed_ip_to_instance(self.context, FAKEUUID, HOST, networks[0]['id']) def test_ip_association_and_allocation_of_other_project(self): """Makes sure that we cannot deallocaate or disassociate a public ip of other project. """ def network_get(_context, network_id, project_only="allow_none"): return networks[network_id] self.stubs.Set(db, 'network_get', network_get) context1 = context.RequestContext('user', 'project1') context2 = context.RequestContext('user', 'project2') float_ip = db.floating_ip_create(context1.elevated(), {'address': '1.2.3.4', 'project_id': context1.project_id}) float_addr = float_ip['address'] instance = db.instance_create(context1, {'project_id': 'project1'}) fix_addr = db.fixed_ip_associate_pool(context1.elevated(), 1, instance['uuid']) # Associate the IP with non-admin user context self.assertRaises(exception.NotAuthorized, self.network.associate_floating_ip, context2, float_addr, fix_addr) # Deallocate address from other project self.assertRaises(exception.NotAuthorized, self.network.deallocate_floating_ip, context2, float_addr) # Now Associates the address to the actual project self.network.associate_floating_ip(context1, float_addr, fix_addr) # Now try dis-associating from other project self.assertRaises(exception.NotAuthorized, self.network.disassociate_floating_ip, context2, float_addr) # Clean up the ip addresses self.network.disassociate_floating_ip(context1, float_addr) self.network.deallocate_floating_ip(context1, float_addr) self.network.deallocate_fixed_ip(context1, fix_addr, 'fake') db.floating_ip_destroy(context1.elevated(), float_addr) db.fixed_ip_disassociate(context1.elevated(), fix_addr) def test_deallocate_fixed(self): """Verify that release is called properly. Ensures https://bugs.launchpad.net/nova/+bug/973442 doesn't return """ def network_get(_context, network_id, project_only="allow_none"): return networks[network_id] self.stubs.Set(db, 'network_get', network_get) def vif_get(_context, _vif_id): return {'address': 'fake_mac'} self.stubs.Set(db, 'virtual_interface_get', vif_get) context1 = context.RequestContext('user', 'project1') instance = db.instance_create(context1, {'project_id': 'project1'}) elevated = context1.elevated() fix_addr = db.fixed_ip_associate_pool(elevated, 1, instance['uuid']) values = {'allocated': True, 'virtual_interface_id': 3} db.fixed_ip_update(elevated, fix_addr, values) fixed = db.fixed_ip_get_by_address(elevated, fix_addr) network = db.network_get(elevated, fixed['network_id']) self.flags(force_dhcp_release=True) self.mox.StubOutWithMock(linux_net, 'release_dhcp') linux_net.release_dhcp(network['bridge'], fixed['address'], 'fake_mac') self.mox.ReplayAll() self.network.deallocate_fixed_ip(context1, fix_addr, 'fake') fixed = db.fixed_ip_get_by_address(elevated, fix_addr) self.assertFalse(fixed['allocated']) def test_deallocate_fixed_deleted(self): # Verify doesn't deallocate deleted fixed_ip from deleted network. def network_get(_context, network_id, project_only="allow_none"): return networks[network_id] def teardown_network_on_host(_context, network): if network['id'] == 0: raise test.TestingException() self.stubs.Set(db, 'network_get', network_get) self.stubs.Set(self.network, '_teardown_network_on_host', teardown_network_on_host) context1 = context.RequestContext('user', 'project1') instance = db.instance_create(context1, {'project_id': 'project1'}) elevated = context1.elevated() fix_addr = db.fixed_ip_associate_pool(elevated, 1, instance['uuid']) db.fixed_ip_update(elevated, fix_addr, {'deleted': 1}) elevated.read_deleted = 'yes' delfixed = db.fixed_ip_get_by_address(elevated, fix_addr) values = {'address': fix_addr, 'network_id': 0, 'instance_uuid': delfixed['instance_uuid']} db.fixed_ip_create(elevated, values) elevated.read_deleted = 'no' elevated.read_deleted = 'yes' deallocate = self.network.deallocate_fixed_ip self.assertRaises(test.TestingException, deallocate, context1, fix_addr, 'fake') def test_deallocate_fixed_no_vif(self): """Verify that deallocate doesn't raise when no vif is returned. Ensures https://bugs.launchpad.net/nova/+bug/968457 doesn't return """ def network_get(_context, network_id, project_only="allow_none"): return networks[network_id] self.stubs.Set(db, 'network_get', network_get) def vif_get(_context, _vif_id): return None self.stubs.Set(db, 'virtual_interface_get', vif_get) context1 = context.RequestContext('user', 'project1') instance = db.instance_create(context1, {'project_id': 'project1'}) elevated = context1.elevated() fix_addr = db.fixed_ip_associate_pool(elevated, 1, instance['uuid']) values = {'allocated': True, 'virtual_interface_id': 3} db.fixed_ip_update(elevated, fix_addr, values) self.flags(force_dhcp_release=True) self.network.deallocate_fixed_ip(context1, fix_addr, 'fake') def test_fixed_ip_cleanup_fail(self): # Verify IP is not deallocated if the security group refresh fails. def network_get(_context, network_id, project_only="allow_none"): return networks[network_id] self.stubs.Set(db, 'network_get', network_get) context1 = context.RequestContext('user', 'project1') instance = db.instance_create(context1, {'project_id': 'project1'}) elevated = context1.elevated() fix_addr = db.fixed_ip_associate_pool(elevated, 1, instance['uuid']) values = {'allocated': True, 'virtual_interface_id': 3} db.fixed_ip_update(elevated, fix_addr, values) def fake_refresh(instance_uuid): raise test.TestingException() self.stubs.Set(self.network, '_do_trigger_security_group_members_refresh_for_instance', fake_refresh) self.assertRaises(test.TestingException, self.network.deallocate_fixed_ip, context1, fix_addr, 'fake') fixed = db.fixed_ip_get_by_address(elevated, fix_addr) self.assertTrue(fixed['allocated']) def test_get_networks_by_uuids_ordering(self): self.mox.StubOutWithMock(db, 'network_get_all_by_uuids') requested_networks = ['bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'] db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(), project_only=mox.IgnoreArg()).AndReturn(networks[:]) self.mox.ReplayAll() res = self.network._get_networks_by_uuids(self.context, requested_networks) self.assertEqual(res[0]['id'], 1) self.assertEqual(res[1]['id'], 0) class _TestDomainObject(object): def __init__(self, **kwargs): for k, v in kwargs.iteritems(): self.__setattr__(k, v) class CommonNetworkTestCase(test.TestCase): def setUp(self): super(CommonNetworkTestCase, self).setUp() self.context = context.RequestContext('fake', 'fake') self.flags(ipv6_backend='rfc2462') ipv6.reset_backend() def test_validate_instance_zone_for_dns_domain(self): domain = 'example.com' az = 'test_az' domains = { domain: _TestDomainObject( domain=domain, availability_zone=az)} def dnsdomain_get(context, instance_domain): return domains.get(instance_domain) self.stubs.Set(db, 'dnsdomain_get', dnsdomain_get) fake_instance = {'uuid': FAKEUUID, 'availability_zone': az} manager = network_manager.NetworkManager() res = manager._validate_instance_zone_for_dns_domain(self.context, fake_instance) self.assertTrue(res) def fake_create_fixed_ips(self, context, network_id, fixed_cidr=None): return None def test_get_instance_nw_info_client_exceptions(self): manager = network_manager.NetworkManager() self.mox.StubOutWithMock(manager.db, 'virtual_interface_get_by_instance') manager.db.virtual_interface_get_by_instance( self.context, FAKEUUID).AndRaise(exception.InstanceNotFound( instance_id=FAKEUUID)) self.mox.ReplayAll() self.assertRaises(rpc_common.ClientException, manager.get_instance_nw_info, self.context, FAKEUUID, 'fake_rxtx_factor', HOST) def test_deallocate_for_instance_passes_host_info(self): manager = fake_network.FakeNetworkManager() db = manager.db db.instance_get = lambda _x, _y: dict(uuid='ignoreduuid') db.virtual_interface_delete_by_instance = lambda _x, _y: None ctx = context.RequestContext('igonre', 'igonre') db.fixed_ip_get_by_instance = lambda x, y: [dict(address='1.2.3.4', network_id='ignoredid')] manager.deallocate_for_instance( ctx, instance_id='ignore', host='somehost') self.assertEquals([ (ctx, '1.2.3.4', 'somehost') ], manager.deallocate_fixed_ip_calls) def test_remove_fixed_ip_from_instance(self): manager = fake_network.FakeNetworkManager() manager.remove_fixed_ip_from_instance(self.context, 99, HOST, '10.0.0.1') self.assertEquals(manager.deallocate_called, '10.0.0.1') def test_remove_fixed_ip_from_instance_bad_input(self): manager = fake_network.FakeNetworkManager() self.assertRaises(exception.FixedIpNotFoundForSpecificInstance, manager.remove_fixed_ip_from_instance, self.context, 99, HOST, 'bad input') def test_validate_cidrs(self): manager = fake_network.FakeNetworkManager() nets = manager.create_networks(None, 'fake', '192.168.0.0/24', False, 1, 256, None, None, None, None, None) self.assertEqual(1, len(nets)) cidrs = [str(net['cidr']) for net in nets] self.assertTrue('192.168.0.0/24' in cidrs) def test_validate_cidrs_split_exact_in_half(self): manager = fake_network.FakeNetworkManager() nets = manager.create_networks(None, 'fake', '192.168.0.0/24', False, 2, 128, None, None, None, None, None) self.assertEqual(2, len(nets)) cidrs = [str(net['cidr']) for net in nets] self.assertTrue('192.168.0.0/25' in cidrs) self.assertTrue('192.168.0.128/25' in cidrs) def test_validate_cidrs_split_cidr_in_use_middle_of_range(self): manager = fake_network.FakeNetworkManager() self.mox.StubOutWithMock(manager.db, 'network_get_all') ctxt = mox.IgnoreArg() manager.db.network_get_all(ctxt).AndReturn([{'id': 1, 'cidr': '192.168.2.0/24'}]) self.mox.ReplayAll() nets = manager.create_networks(None, 'fake', '192.168.0.0/16', False, 4, 256, None, None, None, None, None) self.assertEqual(4, len(nets)) cidrs = [str(net['cidr']) for net in nets] exp_cidrs = ['192.168.0.0/24', '192.168.1.0/24', '192.168.3.0/24', '192.168.4.0/24'] for exp_cidr in exp_cidrs: self.assertTrue(exp_cidr in cidrs) self.assertFalse('192.168.2.0/24' in cidrs) def test_validate_cidrs_smaller_subnet_in_use(self): manager = fake_network.FakeNetworkManager() self.mox.StubOutWithMock(manager.db, 'network_get_all') ctxt = mox.IgnoreArg() manager.db.network_get_all(ctxt).AndReturn([{'id': 1, 'cidr': '192.168.2.9/25'}]) self.mox.ReplayAll() # CidrConflict: requested cidr (192.168.2.0/24) conflicts with # existing smaller cidr args = (None, 'fake', '192.168.2.0/24', False, 1, 256, None, None, None, None, None) self.assertRaises(exception.CidrConflict, manager.create_networks, *args) def test_validate_cidrs_split_smaller_cidr_in_use(self): manager = fake_network.FakeNetworkManager() self.mox.StubOutWithMock(manager.db, 'network_get_all') ctxt = mox.IgnoreArg() manager.db.network_get_all(ctxt).AndReturn([{'id': 1, 'cidr': '192.168.2.0/25'}]) self.mox.ReplayAll() nets = manager.create_networks(None, 'fake', '192.168.0.0/16', False, 4, 256, None, None, None, None, None) self.assertEqual(4, len(nets)) cidrs = [str(net['cidr']) for net in nets] exp_cidrs = ['192.168.0.0/24', '192.168.1.0/24', '192.168.3.0/24', '192.168.4.0/24'] for exp_cidr in exp_cidrs: self.assertTrue(exp_cidr in cidrs) self.assertFalse('192.168.2.0/24' in cidrs) def test_validate_cidrs_split_smaller_cidr_in_use2(self): manager = fake_network.FakeNetworkManager() self.mox.StubOutWithMock(manager.db, 'network_get_all') ctxt = mox.IgnoreArg() manager.db.network_get_all(ctxt).AndReturn([{'id': 1, 'cidr': '192.168.2.9/29'}]) self.mox.ReplayAll() nets = manager.create_networks(None, 'fake', '192.168.2.0/24', False, 3, 32, None, None, None, None, None) self.assertEqual(3, len(nets)) cidrs = [str(net['cidr']) for net in nets] exp_cidrs = ['192.168.2.32/27', '192.168.2.64/27', '192.168.2.96/27'] for exp_cidr in exp_cidrs: self.assertTrue(exp_cidr in cidrs) self.assertFalse('192.168.2.0/27' in cidrs) def test_validate_cidrs_split_all_in_use(self): manager = fake_network.FakeNetworkManager() self.mox.StubOutWithMock(manager.db, 'network_get_all') ctxt = mox.IgnoreArg() in_use = [{'id': 1, 'cidr': '192.168.2.9/29'}, {'id': 2, 'cidr': '192.168.2.64/26'}, {'id': 3, 'cidr': '192.168.2.128/26'}] manager.db.network_get_all(ctxt).AndReturn(in_use) self.mox.ReplayAll() args = (None, 'fake', '192.168.2.0/24', False, 3, 64, None, None, None, None, None) # CidrConflict: Not enough subnets avail to satisfy requested num_ # networks - some subnets in requested range already # in use self.assertRaises(exception.CidrConflict, manager.create_networks, *args) def test_validate_cidrs_one_in_use(self): manager = fake_network.FakeNetworkManager() args = (None, 'fake', '192.168.0.0/24', False, 2, 256, None, None, None, None, None) # ValueError: network_size * num_networks exceeds cidr size self.assertRaises(ValueError, manager.create_networks, *args) def test_validate_cidrs_already_used(self): manager = fake_network.FakeNetworkManager() self.mox.StubOutWithMock(manager.db, 'network_get_all') ctxt = mox.IgnoreArg() manager.db.network_get_all(ctxt).AndReturn([{'id': 1, 'cidr': '192.168.0.0/24'}]) self.mox.ReplayAll() # CidrConflict: cidr already in use args = (None, 'fake', '192.168.0.0/24', False, 1, 256, None, None, None, None, None) self.assertRaises(exception.CidrConflict, manager.create_networks, *args) def test_validate_cidrs_too_many(self): manager = fake_network.FakeNetworkManager() args = (None, 'fake', '192.168.0.0/24', False, 200, 256, None, None, None, None, None) # ValueError: Not enough subnets avail to satisfy requested # num_networks self.assertRaises(ValueError, manager.create_networks, *args) def test_validate_cidrs_split_partial(self): manager = fake_network.FakeNetworkManager() nets = manager.create_networks(None, 'fake', '192.168.0.0/16', False, 2, 256, None, None, None, None, None) returned_cidrs = [str(net['cidr']) for net in nets] self.assertTrue('192.168.0.0/24' in returned_cidrs) self.assertTrue('192.168.1.0/24' in returned_cidrs) def test_validate_cidrs_conflict_existing_supernet(self): manager = fake_network.FakeNetworkManager() self.mox.StubOutWithMock(manager.db, 'network_get_all') ctxt = mox.IgnoreArg() fakecidr = [{'id': 1, 'cidr': '192.168.0.0/8'}] manager.db.network_get_all(ctxt).AndReturn(fakecidr) self.mox.ReplayAll() args = (None, 'fake', '192.168.0.0/24', False, 1, 256, None, None, None, None, None) # CidrConflict: requested cidr (192.168.0.0/24) conflicts # with existing supernet self.assertRaises(exception.CidrConflict, manager.create_networks, *args) def test_create_networks(self): cidr = '192.168.0.0/24' manager = fake_network.FakeNetworkManager() self.stubs.Set(manager, '_create_fixed_ips', self.fake_create_fixed_ips) args = [None, 'foo', cidr, None, 1, 256, 'fd00::/48', None, None, None, None, None] self.assertTrue(manager.create_networks(*args)) def test_create_networks_cidr_already_used(self): manager = fake_network.FakeNetworkManager() self.mox.StubOutWithMock(manager.db, 'network_get_all') ctxt = mox.IgnoreArg() fakecidr = [{'id': 1, 'cidr': '192.168.0.0/24'}] manager.db.network_get_all(ctxt).AndReturn(fakecidr) self.mox.ReplayAll() args = [None, 'foo', '192.168.0.0/24', None, 1, 256, 'fd00::/48', None, None, None, None, None] self.assertRaises(exception.CidrConflict, manager.create_networks, *args) def test_create_networks_many(self): cidr = '192.168.0.0/16' manager = fake_network.FakeNetworkManager() self.stubs.Set(manager, '_create_fixed_ips', self.fake_create_fixed_ips) args = [None, 'foo', cidr, None, 10, 256, 'fd00::/48', None, None, None, None, None] self.assertTrue(manager.create_networks(*args)) def test_get_instance_uuids_by_ip_regex(self): manager = fake_network.FakeNetworkManager() _vifs = manager.db.virtual_interface_get_all(None) fake_context = context.RequestContext('user', 'project') # Greedy get eveything res = manager.get_instance_uuids_by_ip_filter(fake_context, {'ip': '.*'}) self.assertEqual(len(res), len(_vifs)) # Doesn't exist res = manager.get_instance_uuids_by_ip_filter(fake_context, {'ip': '10.0.0.1'}) self.assertFalse(res) # Get instance 1 res = manager.get_instance_uuids_by_ip_filter(fake_context, {'ip': '172.16.0.2'}) self.assertTrue(res) self.assertEqual(len(res), 1) self.assertEqual(res[0]['instance_uuid'], _vifs[1]['instance_uuid']) # Get instance 2 res = manager.get_instance_uuids_by_ip_filter(fake_context, {'ip': '173.16.0.2'}) self.assertTrue(res) self.assertEqual(len(res), 1) self.assertEqual(res[0]['instance_uuid'], _vifs[2]['instance_uuid']) # Get instance 0 and 1 res = manager.get_instance_uuids_by_ip_filter(fake_context, {'ip': '172.16.0.*'}) self.assertTrue(res) self.assertEqual(len(res), 2) self.assertEqual(res[0]['instance_uuid'], _vifs[0]['instance_uuid']) self.assertEqual(res[1]['instance_uuid'], _vifs[1]['instance_uuid']) # Get instance 1 and 2 res = manager.get_instance_uuids_by_ip_filter(fake_context, {'ip': '17..16.0.2'}) self.assertTrue(res) self.assertEqual(len(res), 2) self.assertEqual(res[0]['instance_uuid'], _vifs[1]['instance_uuid']) self.assertEqual(res[1]['instance_uuid'], _vifs[2]['instance_uuid']) def test_get_instance_uuids_by_ipv6_regex(self): manager = fake_network.FakeNetworkManager() _vifs = manager.db.virtual_interface_get_all(None) fake_context = context.RequestContext('user', 'project') # Greedy get eveything res = manager.get_instance_uuids_by_ip_filter(fake_context, {'ip6': '.*'}) self.assertEqual(len(res), len(_vifs)) # Doesn't exist res = manager.get_instance_uuids_by_ip_filter(fake_context, {'ip6': '.*1034.*'}) self.assertFalse(res) # Get instance 1 res = manager.get_instance_uuids_by_ip_filter(fake_context, {'ip6': '2001:.*2'}) self.assertTrue(res) self.assertEqual(len(res), 1) self.assertEqual(res[0]['instance_uuid'], _vifs[1]['instance_uuid']) # Get instance 2 ip6 = '2001:db8:69:1f:dead:beff:feff:ef03' res = manager.get_instance_uuids_by_ip_filter(fake_context, {'ip6': ip6}) self.assertTrue(res) self.assertEqual(len(res), 1) self.assertEqual(res[0]['instance_uuid'], _vifs[2]['instance_uuid']) # Get instance 0 and 1 res = manager.get_instance_uuids_by_ip_filter(fake_context, {'ip6': '.*ef0[1,2]'}) self.assertTrue(res) self.assertEqual(len(res), 2) self.assertEqual(res[0]['instance_uuid'], _vifs[0]['instance_uuid']) self.assertEqual(res[1]['instance_uuid'], _vifs[1]['instance_uuid']) # Get instance 1 and 2 ip6 = '2001:db8:69:1.:dead:beff:feff:ef0.' res = manager.get_instance_uuids_by_ip_filter(fake_context, {'ip6': ip6}) self.assertTrue(res) self.assertEqual(len(res), 2) self.assertEqual(res[0]['instance_uuid'], _vifs[1]['instance_uuid']) self.assertEqual(res[1]['instance_uuid'], _vifs[2]['instance_uuid']) def test_get_instance_uuids_by_ip(self): manager = fake_network.FakeNetworkManager() _vifs = manager.db.virtual_interface_get_all(None) fake_context = context.RequestContext('user', 'project') # No regex for you! res = manager.get_instance_uuids_by_ip_filter(fake_context, {'fixed_ip': '.*'}) self.assertFalse(res) # Doesn't exist ip = '10.0.0.1' res = manager.get_instance_uuids_by_ip_filter(fake_context, {'fixed_ip': ip}) self.assertFalse(res) # Get instance 1 ip = '172.16.0.2' res = manager.get_instance_uuids_by_ip_filter(fake_context, {'fixed_ip': ip}) self.assertTrue(res) self.assertEqual(len(res), 1) self.assertEqual(res[0]['instance_uuid'], _vifs[1]['instance_uuid']) # Get instance 2 ip = '173.16.0.2' res = manager.get_instance_uuids_by_ip_filter(fake_context, {'fixed_ip': ip}) self.assertTrue(res) self.assertEqual(len(res), 1) self.assertEqual(res[0]['instance_uuid'], _vifs[2]['instance_uuid']) def test_get_network(self): manager = fake_network.FakeNetworkManager() fake_context = context.RequestContext('user', 'project') self.mox.StubOutWithMock(manager.db, 'network_get_by_uuid') manager.db.network_get_by_uuid(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(networks[0]) self.mox.ReplayAll() uuid = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' network = manager.get_network(fake_context, uuid) self.assertEqual(network['uuid'], uuid) def test_get_network_not_found(self): manager = fake_network.FakeNetworkManager() fake_context = context.RequestContext('user', 'project') self.mox.StubOutWithMock(manager.db, 'network_get_by_uuid') manager.db.network_get_by_uuid( mox.IgnoreArg(), mox.IgnoreArg()).AndRaise( exception.NetworkNotFoundForUUID(uuid='fake') ) self.mox.ReplayAll() uuid = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee' self.assertRaises(exception.NetworkNotFound, manager.get_network, fake_context, uuid) def test_get_all_networks(self): manager = fake_network.FakeNetworkManager() fake_context = context.RequestContext('user', 'project') self.mox.StubOutWithMock(manager.db, 'network_get_all') manager.db.network_get_all(mox.IgnoreArg()).AndReturn(networks) self.mox.ReplayAll() output = manager.get_all_networks(fake_context) self.assertEqual(len(networks), 2) self.assertEqual(output[0]['uuid'], 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa') self.assertEqual(output[1]['uuid'], 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb') def test_disassociate_network(self): manager = fake_network.FakeNetworkManager() fake_context = context.RequestContext('user', 'project') self.mox.StubOutWithMock(manager.db, 'network_get_by_uuid') manager.db.network_get_by_uuid(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(networks[0]) self.mox.ReplayAll() uuid = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' manager.disassociate_network(fake_context, uuid) def test_disassociate_network_not_found(self): manager = fake_network.FakeNetworkManager() fake_context = context.RequestContext('user', 'project') self.mox.StubOutWithMock(manager.db, 'network_get_by_uuid') manager.db.network_get_by_uuid( mox.IgnoreArg(), mox.IgnoreArg()).AndRaise( exception.NetworkNotFoundForUUID(uuid='fake') ) self.mox.ReplayAll() uuid = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee' self.assertRaises(exception.NetworkNotFound, manager.disassociate_network, fake_context, uuid) def _test_init_host_static_fixed_range(self, net_manager): self.flags(fake_network=True, fixed_range='10.0.0.0/22', routing_source_ip='192.168.0.1', metadata_host='192.168.0.1', public_interface='eth1', dmz_cidr=['10.0.3.0/24']) binary_name = linux_net.get_binary_name() # Stub out calls we don't want to really run self.stubs.Set(linux_net.iptables_manager, '_apply', lambda: None) self.stubs.Set(floating_ips.FloatingIP, 'init_host_floating_ips', lambda *args: None) # Call the network manager init code to configure the fixed_range net_manager.init_host() # Get the iptables rules that got created current_lines = [] new_lines = linux_net.iptables_manager._modify_rules(current_lines, linux_net.iptables_manager.ipv4['nat'], table_name='nat') # The expected rules that should be configured based on the fixed_range expected_lines = ['[0:0] -A %s-snat -s %s -d 0.0.0.0/0 ' '-j SNAT --to-source %s -o %s' % (binary_name, CONF.fixed_range, CONF.routing_source_ip, CONF.public_interface), '[0:0] -A %s-POSTROUTING -s %s -d %s/32 -j ACCEPT' % (binary_name, CONF.fixed_range, CONF.metadata_host), '[0:0] -A %s-POSTROUTING -s %s -d %s -j ACCEPT' % (binary_name, CONF.fixed_range, CONF.dmz_cidr[0]), '[0:0] -A %s-POSTROUTING -s %s -d %s -m conntrack ! ' '--ctstate DNAT -j ACCEPT' % (binary_name, CONF.fixed_range, CONF.fixed_range)] # Finally, compare the expected rules against the actual ones for line in expected_lines: self.assertTrue(line in new_lines) def _test_init_host_dynamic_fixed_range(self, net_manager): self.flags(fake_network=True, fixed_range='', routing_source_ip='172.16.0.1', metadata_host='172.16.0.1', public_interface='eth1', dmz_cidr=['10.0.3.0/24']) binary_name = linux_net.get_binary_name() # Stub out calls we don't want to really run, mock the db self.stubs.Set(linux_net.iptables_manager, '_apply', lambda: None) self.stubs.Set(floating_ips.FloatingIP, 'init_host_floating_ips', lambda *args: None) self.stubs.Set(net_manager.l3driver, 'initialize_gateway', lambda *args: None) self.mox.StubOutWithMock(db, 'network_get_all_by_host') db.network_get_all_by_host(mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(networks) self.mox.ReplayAll() # Call the network manager init code to configure the fixed_range net_manager.init_host() # Get the iptables rules that got created current_lines = [] new_lines = linux_net.iptables_manager._modify_rules(current_lines, linux_net.iptables_manager.ipv4['nat'], table_name='nat') # The expected rules that should be configured based on the fixed_range expected_lines = ['[0:0] -A %s-snat -s %s -d 0.0.0.0/0 ' '-j SNAT --to-source %s -o %s' % (binary_name, networks[0]['cidr'], CONF.routing_source_ip, CONF.public_interface), '[0:0] -A %s-POSTROUTING -s %s -d %s/32 -j ACCEPT' % (binary_name, networks[0]['cidr'], CONF.metadata_host), '[0:0] -A %s-POSTROUTING -s %s -d %s -j ACCEPT' % (binary_name, networks[0]['cidr'], CONF.dmz_cidr[0]), '[0:0] -A %s-POSTROUTING -s %s -d %s -m conntrack ! ' '--ctstate DNAT -j ACCEPT' % (binary_name, networks[0]['cidr'], networks[0]['cidr']), '[0:0] -A %s-snat -s %s -d 0.0.0.0/0 ' '-j SNAT --to-source %s -o %s' % (binary_name, networks[1]['cidr'], CONF.routing_source_ip, CONF.public_interface), '[0:0] -A %s-POSTROUTING -s %s -d %s/32 -j ACCEPT' % (binary_name, networks[1]['cidr'], CONF.metadata_host), '[0:0] -A %s-POSTROUTING -s %s -d %s -j ACCEPT' % (binary_name, networks[1]['cidr'], CONF.dmz_cidr[0]), '[0:0] -A %s-POSTROUTING -s %s -d %s -m conntrack ! ' '--ctstate DNAT -j ACCEPT' % (binary_name, networks[1]['cidr'], networks[1]['cidr'])] # Compare the expected rules against the actual ones for line in expected_lines: self.assertTrue(line in new_lines) # Add an additional network and ensure the rules get configured new_network = {'id': 2, 'uuid': 'cccccccc-cccc-cccc-cccc-cccccccc', 'label': 'test2', 'injected': False, 'multi_host': False, 'cidr': '192.168.2.0/24', 'cidr_v6': '2001:dba::/64', 'gateway_v6': '2001:dba::1', 'netmask_v6': '64', 'netmask': '255.255.255.0', 'bridge': 'fa1', 'bridge_interface': 'fake_fa1', 'gateway': '192.168.2.1', 'broadcast': '192.168.2.255', 'dns1': '192.168.2.1', 'dns2': '192.168.2.2', 'vlan': None, 'host': HOST, 'project_id': 'fake_project', 'vpn_public_address': '192.168.2.2', 'vpn_public_port': '22', 'vpn_private_address': '10.0.0.2'} # Call the network manager init code to configure the fixed_range ctxt = context.get_admin_context() net_manager._setup_network_on_host(ctxt, new_network) # Get the new iptables rules that got created from adding a new network current_lines = [] new_lines = linux_net.iptables_manager._modify_rules(current_lines, linux_net.iptables_manager.ipv4['nat'], table_name='nat') # Add the new expected rules to the old ones expected_lines += ['[0:0] -A %s-snat -s %s -d 0.0.0.0/0 ' '-j SNAT --to-source %s -o %s' % (binary_name, new_network['cidr'], CONF.routing_source_ip, CONF.public_interface), '[0:0] -A %s-POSTROUTING -s %s -d %s/32 -j ACCEPT' % (binary_name, new_network['cidr'], CONF.metadata_host), '[0:0] -A %s-POSTROUTING -s %s -d %s -j ACCEPT' % (binary_name, new_network['cidr'], CONF.dmz_cidr[0]), '[0:0] -A %s-POSTROUTING -s %s -d %s -m conntrack ' '! --ctstate DNAT -j ACCEPT' % (binary_name, new_network['cidr'], new_network['cidr'])] # Compare the expected rules (with new network) against the actual ones for line in expected_lines: self.assertTrue(line in new_lines) def test_flatdhcpmanager_static_fixed_range(self): """Test FlatDHCPManager NAT rules for fixed_range.""" # Set the network manager self.network = network_manager.FlatDHCPManager(host=HOST) self.network.db = db # Test existing behavior: # CONF.fixed_range is set, NAT based on CONF.fixed_range self._test_init_host_static_fixed_range(self.network) def test_flatdhcpmanager_dynamic_fixed_range(self): """Test FlatDHCPManager NAT rules for fixed_range.""" # Set the network manager self.network = network_manager.FlatDHCPManager(host=HOST) self.network.db = db # Test new behavior: # CONF.fixed_range is not set, defaults to None # Determine networks to NAT based on lookup self._test_init_host_dynamic_fixed_range(self.network) def test_vlanmanager_static_fixed_range(self): """Test VlanManager NAT rules for fixed_range.""" # Set the network manager self.network = network_manager.VlanManager(host=HOST) self.network.db = db # Test existing behavior: # CONF.fixed_range is set, NAT based on CONF.fixed_range self._test_init_host_static_fixed_range(self.network) def test_vlanmanager_dynamic_fixed_range(self): """Test VlanManager NAT rules for fixed_range.""" # Set the network manager self.network = network_manager.VlanManager(host=HOST) self.network.db = db # Test new behavior: # CONF.fixed_range is not set, defaults to None # Determine networks to NAT based on lookup self._test_init_host_dynamic_fixed_range(self.network) class TestRPCFixedManager(network_manager.RPCAllocateFixedIP, network_manager.NetworkManager): """Dummy manager that implements RPCAllocateFixedIP.""" class RPCAllocateTestCase(test.TestCase): """Tests nova.network.manager.RPCAllocateFixedIP.""" def setUp(self): super(RPCAllocateTestCase, self).setUp() self.rpc_fixed = TestRPCFixedManager() self.context = context.RequestContext('fake', 'fake') def test_rpc_allocate(self): """Test to verify bug 855030 doesn't resurface. Mekes sure _rpc_allocate_fixed_ip returns a value so the call returns properly and the greenpool completes. """ address = '10.10.10.10' def fake_allocate(*args, **kwargs): return address def fake_network_get(*args, **kwargs): return {} self.stubs.Set(self.rpc_fixed, 'allocate_fixed_ip', fake_allocate) self.stubs.Set(self.rpc_fixed.db, 'network_get', fake_network_get) rval = self.rpc_fixed._rpc_allocate_fixed_ip(self.context, 'fake_instance', 'fake_network') self.assertEqual(rval, address) class TestFloatingIPManager(floating_ips.FloatingIP, network_manager.NetworkManager): """Dummy manager that implements FloatingIP.""" class AllocateTestCase(test.TestCase): def setUp(self): super(AllocateTestCase, self).setUp() self.useFixture(test.SampleNetworks()) self.conductor = self.start_service( 'conductor', manager=CONF.conductor.manager) self.compute = self.start_service('compute') self.network = self.start_service('network') self.user_id = 'fake' self.project_id = 'fake' self.context = context.RequestContext(self.user_id, self.project_id, is_admin=True) def test_allocate_for_instance(self): address = "10.10.10.10" self.flags(auto_assign_floating_ip=True) db.floating_ip_create(self.context, {'address': address, 'pool': 'nova'}) inst = db.instance_create(self.context, {'host': self.compute.host, 'display_name': HOST, 'instance_type_id': 1}) networks = db.network_get_all(self.context) for network in networks: db.network_update(self.context, network['id'], {'host': self.network.host}) project_id = self.context.project_id nw_info = self.network.allocate_for_instance(self.context, instance_id=inst['id'], instance_uuid=inst['uuid'], host=inst['host'], vpn=None, rxtx_factor=3, project_id=project_id, macs=None) self.assertEquals(1, len(nw_info)) fixed_ip = nw_info.fixed_ips()[0]['address'] self.assertTrue(utils.is_valid_ipv4(fixed_ip)) self.network.deallocate_for_instance(self.context, instance_id=inst['id'], fixed_ips=fixed_ip, host=self.network.host, project_id=project_id) def test_allocate_for_instance_with_mac(self): available_macs = set(['ca:fe:de:ad:be:ef']) inst = db.instance_create(self.context, {'host': self.compute.host, 'display_name': HOST, 'instance_type_id': 1}) networks = db.network_get_all(self.context) for network in networks: db.network_update(self.context, network['id'], {'host': self.network.host}) project_id = self.context.project_id nw_info = self.network.allocate_for_instance(self.context, instance_id=inst['id'], instance_uuid=inst['uuid'], host=inst['host'], vpn=None, rxtx_factor=3, project_id=project_id, macs=available_macs) assigned_macs = [vif['address'] for vif in nw_info] self.assertEquals(1, len(assigned_macs)) self.assertEquals(available_macs.pop(), assigned_macs[0]) self.network.deallocate_for_instance(self.context, instance_id=inst['id'], host=self.network.host, project_id=project_id) def test_allocate_for_instance_not_enough_macs(self): available_macs = set() inst = db.instance_create(self.context, {'host': self.compute.host, 'display_name': HOST, 'instance_type_id': 1}) networks = db.network_get_all(self.context) for network in networks: db.network_update(self.context, network['id'], {'host': self.network.host}) project_id = self.context.project_id self.assertRaises(exception.VirtualInterfaceCreateException, self.network.allocate_for_instance, self.context, instance_id=inst['id'], instance_uuid=inst['uuid'], host=inst['host'], vpn=None, rxtx_factor=3, project_id=project_id, macs=available_macs) class FloatingIPTestCase(test.TestCase): """Tests nova.network.manager.FloatingIP.""" def setUp(self): super(FloatingIPTestCase, self).setUp() self.tempdir = self.useFixture(fixtures.TempDir()).path self.flags(log_dir=self.tempdir) self.network = TestFloatingIPManager() self.network.db = db self.project_id = 'testproject' self.context = context.RequestContext('testuser', self.project_id, is_admin=False) def test_disassociate_floating_ip_multi_host_calls(self): floating_ip = { 'fixed_ip_id': 12 } fixed_ip = { 'network_id': None, 'instance_uuid': 'instance-uuid' } network = { 'multi_host': True } instance = { 'host': 'some-other-host' } ctxt = context.RequestContext('testuser', 'testproject', is_admin=False) self.stubs.Set(self.network.db, 'floating_ip_get_by_address', lambda _x, _y: floating_ip) self.stubs.Set(self.network, '_floating_ip_owned_by_project', lambda _x, _y: True) self.stubs.Set(self.network.db, 'fixed_ip_get', lambda _x, _y: fixed_ip) self.stubs.Set(self.network.db, 'network_get', lambda _x, _y: network) self.stubs.Set(self.network.db, 'instance_get_by_uuid', lambda _x, _y: instance) self.stubs.Set(self.network.db, 'service_get_by_host_and_topic', lambda _x, _y, _z: 'service') self.stubs.Set(self.network.servicegroup_api, 'service_is_up', lambda _x: True) self.mox.StubOutWithMock( self.network.network_rpcapi, '_disassociate_floating_ip') self.network.network_rpcapi._disassociate_floating_ip( ctxt, 'fl_ip', mox.IgnoreArg(), 'some-other-host', 'instance-uuid') self.mox.ReplayAll() self.network.disassociate_floating_ip(ctxt, 'fl_ip', True) def test_associate_floating_ip_multi_host_calls(self): floating_ip = { 'fixed_ip_id': None } fixed_ip = { 'network_id': None, 'instance_uuid': 'instance-uuid' } network = { 'multi_host': True } instance = { 'host': 'some-other-host' } ctxt = context.RequestContext('testuser', 'testproject', is_admin=False) self.stubs.Set(self.network.db, 'floating_ip_get_by_address', lambda _x, _y: floating_ip) self.stubs.Set(self.network, '_floating_ip_owned_by_project', lambda _x, _y: True) self.stubs.Set(self.network.db, 'fixed_ip_get_by_address', lambda _x, _y: fixed_ip) self.stubs.Set(self.network.db, 'network_get', lambda _x, _y: network) self.stubs.Set(self.network.db, 'instance_get_by_uuid', lambda _x, _y: instance) self.mox.StubOutWithMock( self.network.network_rpcapi, '_associate_floating_ip') self.network.network_rpcapi._associate_floating_ip( ctxt, 'fl_ip', 'fix_ip', mox.IgnoreArg(), 'some-other-host', 'instance-uuid') self.mox.ReplayAll() self.network.associate_floating_ip(ctxt, 'fl_ip', 'fix_ip', True) def test_double_deallocation(self): instance_ref = db.instance_create(self.context, {"project_id": self.project_id}) # Run it twice to make it fault if it does not handle # instances without fixed networks # If this fails in either, it does not handle having no addresses self.network.deallocate_for_instance(self.context, instance_id=instance_ref['id']) self.network.deallocate_for_instance(self.context, instance_id=instance_ref['id']) def test_deallocation_deleted_instance(self): self.stubs.Set(self.network, '_teardown_network_on_host', lambda *args, **kwargs: None) instance = db.instance_create(self.context, { 'project_id': self.project_id, 'deleted': True}) network = db.network_create_safe(self.context.elevated(), { 'project_id': self.project_id}) fixed = db.fixed_ip_create(self.context, {'allocated': True, 'instance_uuid': instance['uuid'], 'address': '10.1.1.1', 'network_id': network['id']}) db.floating_ip_create(self.context, { 'address': '10.10.10.10', 'instance_uuid': instance['uuid'], 'fixed_ip_id': fixed['id'], 'project_id': self.project_id}) self.network.deallocate_for_instance(self.context, instance_id=instance['uuid']) def test_deallocation_duplicate_floating_ip(self): self.stubs.Set(self.network, '_teardown_network_on_host', lambda *args, **kwargs: None) instance = db.instance_create(self.context, { 'project_id': self.project_id}) network = db.network_create_safe(self.context.elevated(), { 'project_id': self.project_id}) fixed = db.fixed_ip_create(self.context, {'allocated': True, 'instance_uuid': instance['uuid'], 'address': '10.1.1.1', 'network_id': network['id']}) db.floating_ip_create(self.context, { 'address': '10.10.10.10', 'deleted': True}) db.floating_ip_create(self.context, { 'address': '10.10.10.10', 'instance_uuid': instance['uuid'], 'fixed_ip_id': fixed['id'], 'project_id': self.project_id}) self.network.deallocate_for_instance(self.context, instance_id=instance['uuid']) def test_migrate_instance_start(self): called = {'count': 0} def fake_floating_ip_get_by_address(context, address): return {'address': address, 'fixed_ip_id': 0} def fake_is_stale_floating_ip_address(context, floating_ip): return floating_ip['address'] == '172.24.4.23' def fake_fixed_ip_get(context, fixed_ip_id, get_network): return {'instance_uuid': 'fake_uuid', 'address': '10.0.0.2', 'network': 'fakenet'} def fake_remove_floating_ip(floating_addr, fixed_addr, interface, network): called['count'] += 1 def fake_clean_conntrack(fixed_ip): if not fixed_ip == "10.0.0.2": raise exception.FixedIpInvalid(address=fixed_ip) def fake_floating_ip_update(context, address, args): pass self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake_floating_ip_get_by_address) self.stubs.Set(self.network, '_is_stale_floating_ip_address', fake_is_stale_floating_ip_address) self.stubs.Set(self.network.db, 'fixed_ip_get', fake_fixed_ip_get) self.stubs.Set(self.network.db, 'floating_ip_update', fake_floating_ip_update) self.stubs.Set(self.network.l3driver, 'remove_floating_ip', fake_remove_floating_ip) self.stubs.Set(self.network.l3driver, 'clean_conntrack', fake_clean_conntrack) self.mox.ReplayAll() addresses = ['172.24.4.23', '172.24.4.24', '172.24.4.25'] self.network.migrate_instance_start(self.context, instance_uuid=FAKEUUID, floating_addresses=addresses, rxtx_factor=3, project_id=self.project_id, source='fake_source', dest='fake_dest') self.assertEqual(called['count'], 2) def test_migrate_instance_finish(self): called = {'count': 0} def fake_floating_ip_get_by_address(context, address): return {'address': address, 'fixed_ip_id': 0} def fake_is_stale_floating_ip_address(context, floating_ip): return floating_ip['address'] == '172.24.4.23' def fake_fixed_ip_get(context, fixed_ip_id, get_network): return {'instance_uuid': 'fake_uuid', 'address': '10.0.0.2', 'network': 'fakenet'} def fake_add_floating_ip(floating_addr, fixed_addr, interface, network): called['count'] += 1 def fake_floating_ip_update(context, address, args): pass self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake_floating_ip_get_by_address) self.stubs.Set(self.network, '_is_stale_floating_ip_address', fake_is_stale_floating_ip_address) self.stubs.Set(self.network.db, 'fixed_ip_get', fake_fixed_ip_get) self.stubs.Set(self.network.db, 'floating_ip_update', fake_floating_ip_update) self.stubs.Set(self.network.l3driver, 'add_floating_ip', fake_add_floating_ip) self.mox.ReplayAll() addresses = ['172.24.4.23', '172.24.4.24', '172.24.4.25'] self.network.migrate_instance_finish(self.context, instance_uuid=FAKEUUID, floating_addresses=addresses, host='fake_dest', rxtx_factor=3, project_id=self.project_id, source='fake_source') self.assertEqual(called['count'], 2) def test_floating_dns_create_conflict(self): zone = "example.org" address1 = "10.10.10.11" name1 = "foo" self.network.add_dns_entry(self.context, address1, name1, "A", zone) self.assertRaises(exception.FloatingIpDNSExists, self.network.add_dns_entry, self.context, address1, name1, "A", zone) def test_floating_create_and_get(self): zone = "example.org" address1 = "10.10.10.11" name1 = "foo" name2 = "bar" entries = self.network.get_dns_entries_by_address(self.context, address1, zone) self.assertFalse(entries) self.network.add_dns_entry(self.context, address1, name1, "A", zone) self.network.add_dns_entry(self.context, address1, name2, "A", zone) entries = self.network.get_dns_entries_by_address(self.context, address1, zone) self.assertEquals(len(entries), 2) self.assertEquals(entries[0], name1) self.assertEquals(entries[1], name2) entries = self.network.get_dns_entries_by_name(self.context, name1, zone) self.assertEquals(len(entries), 1) self.assertEquals(entries[0], address1) def test_floating_dns_delete(self): zone = "example.org" address1 = "10.10.10.11" name1 = "foo" name2 = "bar" self.network.add_dns_entry(self.context, address1, name1, "A", zone) self.network.add_dns_entry(self.context, address1, name2, "A", zone) self.network.delete_dns_entry(self.context, name1, zone) entries = self.network.get_dns_entries_by_address(self.context, address1, zone) self.assertEquals(len(entries), 1) self.assertEquals(entries[0], name2) self.assertRaises(exception.NotFound, self.network.delete_dns_entry, self.context, name1, zone) def test_floating_dns_domains_public(self): zone1 = "testzone" domain1 = "example.org" domain2 = "example.com" address1 = '10.10.10.10' entryname = 'testentry' context_admin = context.RequestContext('testuser', 'testproject', is_admin=True) self.assertRaises(exception.AdminRequired, self.network.create_public_dns_domain, self.context, domain1, zone1) self.network.create_public_dns_domain(context_admin, domain1, 'testproject') self.network.create_public_dns_domain(context_admin, domain2, 'fakeproject') domains = self.network.get_dns_domains(self.context) self.assertEquals(len(domains), 2) self.assertEquals(domains[0]['domain'], domain1) self.assertEquals(domains[1]['domain'], domain2) self.assertEquals(domains[0]['project'], 'testproject') self.assertEquals(domains[1]['project'], 'fakeproject') self.network.add_dns_entry(self.context, address1, entryname, 'A', domain1) entries = self.network.get_dns_entries_by_name(self.context, entryname, domain1) self.assertEquals(len(entries), 1) self.assertEquals(entries[0], address1) self.assertRaises(exception.AdminRequired, self.network.delete_dns_domain, self.context, domain1) self.network.delete_dns_domain(context_admin, domain1) self.network.delete_dns_domain(context_admin, domain2) # Verify that deleting the domain deleted the associated entry entries = self.network.get_dns_entries_by_name(self.context, entryname, domain1) self.assertFalse(entries) def test_delete_all_by_ip(self): domain1 = "example.org" domain2 = "example.com" address = "10.10.10.10" name1 = "foo" name2 = "bar" def fake_domains(context): return [{'domain': 'example.org', 'scope': 'public'}, {'domain': 'example.com', 'scope': 'public'}, {'domain': 'test.example.org', 'scope': 'public'}] self.stubs.Set(self.network, 'get_dns_domains', fake_domains) context_admin = context.RequestContext('testuser', 'testproject', is_admin=True) self.network.create_public_dns_domain(context_admin, domain1, 'testproject') self.network.create_public_dns_domain(context_admin, domain2, 'fakeproject') domains = self.network.get_dns_domains(self.context) for domain in domains: self.network.add_dns_entry(self.context, address, name1, "A", domain['domain']) self.network.add_dns_entry(self.context, address, name2, "A", domain['domain']) entries = self.network.get_dns_entries_by_address(self.context, address, domain['domain']) self.assertEquals(len(entries), 2) self.network._delete_all_entries_for_ip(self.context, address) for domain in domains: entries = self.network.get_dns_entries_by_address(self.context, address, domain['domain']) self.assertFalse(entries) self.network.delete_dns_domain(context_admin, domain1) self.network.delete_dns_domain(context_admin, domain2) def test_mac_conflicts(self): # Make sure MAC collisions are retried. self.flags(create_unique_mac_address_attempts=3) ctxt = context.RequestContext('testuser', 'testproject', is_admin=True) macs = ['bb:bb:bb:bb:bb:bb', 'aa:aa:aa:aa:aa:aa'] # Create a VIF with aa:aa:aa:aa:aa:aa crash_test_dummy_vif = { 'address': macs[1], 'instance_uuid': 'fake_uuid', 'network_id': 'fake_net', 'uuid': 'fake_uuid', } self.network.db.virtual_interface_create(ctxt, crash_test_dummy_vif) # Hand out a collision first, then a legit MAC def fake_gen_mac(): return macs.pop() self.stubs.Set(utils, 'generate_mac_address', fake_gen_mac) # SQLite doesn't seem to honor the uniqueness constraint on the # address column, so fake the collision-avoidance here def fake_vif_save(vif): if vif.address == crash_test_dummy_vif['address']: raise db_exc.DBError("If you're smart, you'll retry!") self.stubs.Set(models.VirtualInterface, 'save', fake_vif_save) # Attempt to add another and make sure that both MACs are consumed # by the retry loop self.network._add_virtual_interface(ctxt, 'fake_uuid', 'fake_net') self.assertEqual(macs, []) def test_deallocate_client_exceptions(self): # Ensure that FloatingIpNotFoundForAddress is wrapped. self.mox.StubOutWithMock(self.network.db, 'floating_ip_get_by_address') self.network.db.floating_ip_get_by_address( self.context, '1.2.3.4').AndRaise( exception.FloatingIpNotFoundForAddress(address='fake')) self.mox.ReplayAll() self.assertRaises(rpc_common.ClientException, self.network.deallocate_floating_ip, self.context, '1.2.3.4') def test_associate_client_exceptions(self): # Ensure that FloatingIpNotFoundForAddress is wrapped. self.mox.StubOutWithMock(self.network.db, 'floating_ip_get_by_address') self.network.db.floating_ip_get_by_address( self.context, '1.2.3.4').AndRaise( exception.FloatingIpNotFoundForAddress(address='fake')) self.mox.ReplayAll() self.assertRaises(rpc_common.ClientException, self.network.associate_floating_ip, self.context, '1.2.3.4', '10.0.0.1') def test_disassociate_client_exceptions(self): # Ensure that FloatingIpNotFoundForAddress is wrapped. self.mox.StubOutWithMock(self.network.db, 'floating_ip_get_by_address') self.network.db.floating_ip_get_by_address( self.context, '1.2.3.4').AndRaise( exception.FloatingIpNotFoundForAddress(address='fake')) self.mox.ReplayAll() self.assertRaises(rpc_common.ClientException, self.network.disassociate_floating_ip, self.context, '1.2.3.4') def test_get_floating_ip_client_exceptions(self): # Ensure that FloatingIpNotFoundForAddress is wrapped. self.mox.StubOutWithMock(self.network.db, 'floating_ip_get') self.network.db.floating_ip_get(self.context, 'fake-id').AndRaise( exception.FloatingIpNotFound(id='fake')) self.mox.ReplayAll() self.assertRaises(rpc_common.ClientException, self.network.get_floating_ip, self.context, 'fake-id') class InstanceDNSTestCase(test.TestCase): """Tests nova.network.manager instance DNS.""" def setUp(self): super(InstanceDNSTestCase, self).setUp() self.tempdir = self.useFixture(fixtures.TempDir()).path self.flags(log_dir=self.tempdir) self.network = TestFloatingIPManager() self.network.db = db self.project_id = 'testproject' self.context = context.RequestContext('testuser', self.project_id, is_admin=False) def test_dns_domains_private(self): zone1 = 'testzone' domain1 = 'example.org' context_admin = context.RequestContext('testuser', 'testproject', is_admin=True) self.assertRaises(exception.AdminRequired, self.network.create_private_dns_domain, self.context, domain1, zone1) self.network.create_private_dns_domain(context_admin, domain1, zone1) domains = self.network.get_dns_domains(self.context) self.assertEquals(len(domains), 1) self.assertEquals(domains[0]['domain'], domain1) self.assertEquals(domains[0]['availability_zone'], zone1) self.assertRaises(exception.AdminRequired, self.network.delete_dns_domain, self.context, domain1) self.network.delete_dns_domain(context_admin, domain1) domain1 = "example.org" domain2 = "example.com" class LdapDNSTestCase(test.TestCase): """Tests nova.network.ldapdns.LdapDNS.""" def setUp(self): super(LdapDNSTestCase, self).setUp() self.useFixture(test.ReplaceModule('ldap', fake_ldap)) dns_class = 'nova.network.ldapdns.LdapDNS' self.driver = importutils.import_object(dns_class) attrs = {'objectClass': ['domainrelatedobject', 'dnsdomain', 'domain', 'dcobject', 'top'], 'associateddomain': ['root'], 'dc': ['root']} self.driver.lobj.add_s("ou=hosts,dc=example,dc=org", attrs.items()) self.driver.create_domain(domain1) self.driver.create_domain(domain2) def tearDown(self): self.driver.delete_domain(domain1) self.driver.delete_domain(domain2) super(LdapDNSTestCase, self).tearDown() def test_ldap_dns_domains(self): domains = self.driver.get_domains() self.assertEqual(len(domains), 2) self.assertIn(domain1, domains) self.assertIn(domain2, domains) def test_ldap_dns_create_conflict(self): address1 = "10.10.10.11" name1 = "foo" self.driver.create_entry(name1, address1, "A", domain1) self.assertRaises(exception.FloatingIpDNSExists, self.driver.create_entry, name1, address1, "A", domain1) def test_ldap_dns_create_and_get(self): address1 = "10.10.10.11" name1 = "foo" name2 = "bar" entries = self.driver.get_entries_by_address(address1, domain1) self.assertFalse(entries) self.driver.create_entry(name1, address1, "A", domain1) self.driver.create_entry(name2, address1, "A", domain1) entries = self.driver.get_entries_by_address(address1, domain1) self.assertEquals(len(entries), 2) self.assertEquals(entries[0], name1) self.assertEquals(entries[1], name2) entries = self.driver.get_entries_by_name(name1, domain1) self.assertEquals(len(entries), 1) self.assertEquals(entries[0], address1) def test_ldap_dns_delete(self): address1 = "10.10.10.11" name1 = "foo" name2 = "bar" self.driver.create_entry(name1, address1, "A", domain1) self.driver.create_entry(name2, address1, "A", domain1) entries = self.driver.get_entries_by_address(address1, domain1) self.assertEquals(len(entries), 2) self.driver.delete_entry(name1, domain1) entries = self.driver.get_entries_by_address(address1, domain1) LOG.debug("entries: %s" % entries) self.assertEquals(len(entries), 1) self.assertEquals(entries[0], name2) self.assertRaises(exception.NotFound, self.driver.delete_entry, name1, domain1)
plumgrid/plumgrid-nova
nova/tests/network/test_manager.py
Python
apache-2.0
115,487
[ "FEFF" ]
f488df65309d335af36d20a44cac457543a477d641230e16fe5a0284f6411935
""" Student Views """ import datetime import logging import uuid import json import warnings from collections import defaultdict from pytz import UTC from requests import HTTPError from ipware.ip import get_ip from django.conf import settings from django.contrib.auth import logout, authenticate, login from django.contrib.auth.models import User, AnonymousUser from django.contrib.auth.decorators import login_required from django.contrib.auth.views import password_reset_confirm from django.contrib import messages from django.core.context_processors import csrf from django.core import mail from django.core.urlresolvers import reverse from django.core.validators import validate_email, ValidationError from django.db import IntegrityError, transaction from django.http import (HttpResponse, HttpResponseBadRequest, HttpResponseForbidden, HttpResponseServerError, Http404) from django.shortcuts import redirect from django.utils.translation import ungettext from django.utils.http import base36_to_int from django.utils.translation import ugettext as _, get_language from django.views.decorators.cache import never_cache from django.views.decorators.csrf import csrf_exempt, ensure_csrf_cookie from django.views.decorators.http import require_POST, require_GET from django.db.models.signals import post_save from django.dispatch import receiver from django.template.response import TemplateResponse from ratelimitbackend.exceptions import RateLimitException from social.apps.django_app import utils as social_utils from social.backends import oauth as social_oauth from social.exceptions import AuthException, AuthAlreadyAssociated from edxmako.shortcuts import render_to_response, render_to_string from course_modes.models import CourseMode from shoppingcart.api import order_history from student.models import ( Registration, UserProfile, PendingEmailChange, CourseEnrollment, CourseEnrollmentAttribute, unique_id_for_user, CourseEnrollmentAllowed, UserStanding, LoginFailures, create_comments_service_user, PasswordHistory, UserSignupSource, DashboardConfiguration, LinkedInAddToProfileConfiguration, ManualEnrollmentAudit, ALLOWEDTOENROLL_TO_ENROLLED) from student.forms import AccountCreationForm, PasswordResetFormNoActive from verify_student.models import SoftwareSecurePhotoVerification # pylint: disable=import-error from certificates.models import CertificateStatuses, certificate_status_for_student from certificates.api import ( # pylint: disable=import-error get_certificate_url, has_html_certificates_enabled, ) from xmodule.modulestore.django import modulestore from opaque_keys import InvalidKeyError from opaque_keys.edx.keys import CourseKey from opaque_keys.edx.locations import SlashSeparatedCourseKey from opaque_keys.edx.locator import CourseLocator from xmodule.modulestore import ModuleStoreEnum from collections import namedtuple from courseware.courses import get_courses, sort_by_announcement, sort_by_start_date # pylint: disable=import-error from courseware.access import has_access from django_comment_common.models import Role from external_auth.models import ExternalAuthMap import external_auth.views from external_auth.login_and_register import ( login as external_auth_login, register as external_auth_register ) from bulk_email.models import Optout, CourseAuthorization from lang_pref import LANGUAGE_KEY import track.views import dogstats_wrapper as dog_stats_api from util.db import commit_on_success_with_read_committed from util.json_request import JsonResponse from util.bad_request_rate_limiter import BadRequestRateLimiter from util.milestones_helpers import ( get_pre_requisite_courses_not_completed, ) from microsite_configuration import microsite from util.password_policy_validators import ( validate_password_length, validate_password_complexity, validate_password_dictionary ) import third_party_auth from third_party_auth import pipeline, provider from student.helpers import ( check_verify_status_by_course, auth_pipeline_urls, get_next_url_for_login_page ) from student.cookies import set_logged_in_cookies, delete_logged_in_cookies from student.models import anonymous_id_for_user from shoppingcart.models import DonationConfiguration, CourseRegistrationCode from embargo import api as embargo_api import analytics from eventtracking import tracker # Note that this lives in LMS, so this dependency should be refactored. from notification_prefs.views import enable_notifications # Note that this lives in openedx, so this dependency should be refactored. from openedx.core.djangoapps.user_api.preferences import api as preferences_api log = logging.getLogger("edx.student") AUDIT_LOG = logging.getLogger("audit") ReverifyInfo = namedtuple('ReverifyInfo', 'course_id course_name course_number date status display') # pylint: disable=invalid-name SETTING_CHANGE_INITIATED = 'edx.user.settings.change_initiated' def csrf_token(context): """A csrf token that can be included in a form.""" token = context.get('csrf_token', '') if token == 'NOTPROVIDED': return '' return (u'<div style="display:none"><input type="hidden"' ' name="csrfmiddlewaretoken" value="%s" /></div>' % (token)) # NOTE: This view is not linked to directly--it is called from # branding/views.py:index(), which is cached for anonymous users. # This means that it should always return the same thing for anon # users. (in particular, no switching based on query params allowed) def index(request, extra_context=None, user=AnonymousUser()): """ Render the edX main page. extra_context is used to allow immediate display of certain modal windows, eg signup, as used by external_auth. """ if extra_context is None: extra_context = {} # The course selection work is done in courseware.courses. domain = settings.FEATURES.get('FORCE_UNIVERSITY_DOMAIN') # normally False # do explicit check, because domain=None is valid if domain is False: domain = request.META.get('HTTP_HOST') courses = get_courses(user, domain=domain) if microsite.get_value("ENABLE_COURSE_SORTING_BY_START_DATE", settings.FEATURES["ENABLE_COURSE_SORTING_BY_START_DATE"]): courses = sort_by_start_date(courses) else: courses = sort_by_announcement(courses) context = {'courses': courses} context.update(extra_context) return render_to_response('index.html', context) def process_survey_link(survey_link, user): """ If {UNIQUE_ID} appears in the link, replace it with a unique id for the user. Currently, this is sha1(user.username). Otherwise, return survey_link. """ return survey_link.format(UNIQUE_ID=unique_id_for_user(user)) def cert_info(user, course_overview, course_mode): """ Get the certificate info needed to render the dashboard section for the given student and course. Arguments: user (User): A user. course_overview (CourseOverview): A course. course_mode (str): The enrollment mode (honor, verified, audit, etc.) Returns: dict: A dictionary with keys: 'status': one of 'generating', 'ready', 'notpassing', 'processing', 'restricted' 'show_download_url': bool 'download_url': url, only present if show_download_url is True 'show_disabled_download_button': bool -- true if state is 'generating' 'show_survey_button': bool 'survey_url': url, only if show_survey_button is True 'grade': if status is not 'processing' """ if not course_overview.may_certify(): return {} return _cert_info( user, course_overview, certificate_status_for_student(user, course_overview.id), course_mode ) def reverification_info(statuses): """ Returns reverification-related information for *all* of user's enrollments whose reverification status is in statuses. Args: statuses (list): a list of reverification statuses we want information for example: ["must_reverify", "denied"] Returns: dictionary of lists: dictionary with one key per status, e.g. dict["must_reverify"] = [] dict["must_reverify"] = [some information] """ reverifications = defaultdict(list) # Sort the data by the reverification_end_date for status in statuses: if reverifications[status]: reverifications[status].sort(key=lambda x: x.date) return reverifications def get_course_enrollments(user, org_to_include, orgs_to_exclude): """ Given a user, return a filtered set of his or her course enrollments. Arguments: user (User): the user in question. org_to_include (str): for use in Microsites. If not None, ONLY courses of this org will be returned. orgs_to_exclude (list[str]): If org_to_include is not None, this argument is ignored. Else, courses of this org will be excluded. Returns: generator[CourseEnrollment]: a sequence of enrollments to be displayed on the user's dashboard. """ for enrollment in CourseEnrollment.enrollments_for_user(user): # If the course is missing or broken, log an error and skip it. course_overview = enrollment.course_overview if not course_overview: log.error( "User %s enrolled in broken or non-existent course %s", user.username, enrollment.course_id ) continue # If we are in a Microsite, then filter out anything that is not # attributed (by ORG) to that Microsite. if org_to_include and course_overview.location.org != org_to_include: continue # Conversely, if we are not in a Microsite, then filter out any enrollments # with courses attributed (by ORG) to Microsites. elif course_overview.location.org in orgs_to_exclude: continue # Else, include the enrollment. else: yield enrollment def _cert_info(user, course_overview, cert_status, course_mode): # pylint: disable=unused-argument """ Implements the logic for cert_info -- split out for testing. Arguments: user (User): A user. course_overview (CourseOverview): A course. course_mode (str): The enrollment mode (honor, verified, audit, etc.) """ # simplify the status for the template using this lookup table template_state = { CertificateStatuses.generating: 'generating', CertificateStatuses.regenerating: 'generating', CertificateStatuses.downloadable: 'ready', CertificateStatuses.notpassing: 'notpassing', CertificateStatuses.restricted: 'restricted', } default_status = 'processing' default_info = {'status': default_status, 'show_disabled_download_button': False, 'show_download_url': False, 'show_survey_button': False, } if cert_status is None: return default_info is_hidden_status = cert_status['status'] in ('unavailable', 'processing', 'generating', 'notpassing') if course_overview.certificates_display_behavior == 'early_no_info' and is_hidden_status: return None status = template_state.get(cert_status['status'], default_status) status_dict = { 'status': status, 'show_download_url': status == 'ready', 'show_disabled_download_button': status == 'generating', 'mode': cert_status.get('mode', None), 'linked_in_url': None } if (status in ('generating', 'ready', 'notpassing', 'restricted') and course_overview.end_of_course_survey_url is not None): status_dict.update({ 'show_survey_button': True, 'survey_url': process_survey_link(course_overview.end_of_course_survey_url, user)}) else: status_dict['show_survey_button'] = False if status == 'ready': # showing the certificate web view button if certificate is ready state and feature flags are enabled. if has_html_certificates_enabled(course_overview.id, course_overview): if course_overview.has_any_active_web_certificate: certificate_url = get_certificate_url( user_id=user.id, course_id=unicode(course_overview.id), ) status_dict.update({ 'show_cert_web_view': True, 'cert_web_view_url': u'{url}'.format(url=certificate_url) }) else: # don't show download certificate button if we don't have an active certificate for course status_dict['show_download_url'] = False elif 'download_url' not in cert_status: log.warning( u"User %s has a downloadable cert for %s, but no download url", user.username, course_overview.id ) return default_info else: status_dict['download_url'] = cert_status['download_url'] # If enabled, show the LinkedIn "add to profile" button # Clicking this button sends the user to LinkedIn where they # can add the certificate information to their profile. linkedin_config = LinkedInAddToProfileConfiguration.current() if linkedin_config.enabled: status_dict['linked_in_url'] = linkedin_config.add_to_profile_url( course_overview.id, course_overview.display_name, cert_status.get('mode'), cert_status['download_url'] ) if status in ('generating', 'ready', 'notpassing', 'restricted'): if 'grade' not in cert_status: # Note: as of 11/20/2012, we know there are students in this state-- cs169.1x, # who need to be regraded (we weren't tracking 'notpassing' at first). # We can add a log.warning here once we think it shouldn't happen. return default_info else: status_dict['grade'] = cert_status['grade'] return status_dict @ensure_csrf_cookie def signin_user(request): """Deprecated. To be replaced by :class:`student_account.views.login_and_registration_form`.""" external_auth_response = external_auth_login(request) if external_auth_response is not None: return external_auth_response # Determine the URL to redirect to following login: redirect_to = get_next_url_for_login_page(request) if request.user.is_authenticated(): return redirect(redirect_to) third_party_auth_error = None for msg in messages.get_messages(request): if msg.extra_tags.split()[0] == "social-auth": # msg may or may not be translated. Try translating [again] in case we are able to: third_party_auth_error = _(unicode(msg)) # pylint: disable=translation-of-non-string break context = { 'login_redirect_url': redirect_to, # This gets added to the query string of the "Sign In" button in the header # Bool injected into JS to submit form if we're inside a running third- # party auth pipeline; distinct from the actual instance of the running # pipeline, if any. 'pipeline_running': 'true' if pipeline.running(request) else 'false', 'pipeline_url': auth_pipeline_urls(pipeline.AUTH_ENTRY_LOGIN, redirect_url=redirect_to), 'platform_name': microsite.get_value( 'platform_name', settings.PLATFORM_NAME ), 'third_party_auth_error': third_party_auth_error } return render_to_response('login.html', context) @ensure_csrf_cookie def register_user(request, extra_context=None): """Deprecated. To be replaced by :class:`student_account.views.login_and_registration_form`.""" # Determine the URL to redirect to following login: redirect_to = get_next_url_for_login_page(request) if request.user.is_authenticated(): return redirect(redirect_to) external_auth_response = external_auth_register(request) if external_auth_response is not None: return external_auth_response context = { 'login_redirect_url': redirect_to, # This gets added to the query string of the "Sign In" button in the header 'email': '', 'name': '', 'running_pipeline': None, 'pipeline_urls': auth_pipeline_urls(pipeline.AUTH_ENTRY_REGISTER, redirect_url=redirect_to), 'platform_name': microsite.get_value( 'platform_name', settings.PLATFORM_NAME ), 'selected_provider': '', 'username': '', } if extra_context is not None: context.update(extra_context) if context.get("extauth_domain", '').startswith(external_auth.views.SHIBBOLETH_DOMAIN_PREFIX): return render_to_response('register-shib.html', context) # If third-party auth is enabled, prepopulate the form with data from the # selected provider. if third_party_auth.is_enabled() and pipeline.running(request): running_pipeline = pipeline.get(request) current_provider = provider.Registry.get_from_pipeline(running_pipeline) overrides = current_provider.get_register_form_data(running_pipeline.get('kwargs')) overrides['running_pipeline'] = running_pipeline overrides['selected_provider'] = current_provider.name context.update(overrides) return render_to_response('register.html', context) def complete_course_mode_info(course_id, enrollment, modes=None): """ We would like to compute some more information from the given course modes and the user's current enrollment Returns the given information: - whether to show the course upsell information - numbers of days until they can't upsell anymore """ if modes is None: modes = CourseMode.modes_for_course_dict(course_id) mode_info = {'show_upsell': False, 'days_for_upsell': None} # we want to know if the user is already verified and if verified is an # option if 'verified' in modes and enrollment.mode != 'verified': mode_info['show_upsell'] = True # if there is an expiration date, find out how long from now it is if modes['verified'].expiration_datetime: today = datetime.datetime.now(UTC).date() mode_info['days_for_upsell'] = (modes['verified'].expiration_datetime.date() - today).days return mode_info def is_course_blocked(request, redeemed_registration_codes, course_key): """Checking either registration is blocked or not .""" blocked = False for redeemed_registration in redeemed_registration_codes: # registration codes may be generated via Bulk Purchase Scenario # we have to check only for the invoice generated registration codes # that their invoice is valid or not if redeemed_registration.invoice_item: if not getattr(redeemed_registration.invoice_item.invoice, 'is_valid'): blocked = True # disabling email notifications for unpaid registration courses Optout.objects.get_or_create(user=request.user, course_id=course_key) log.info( u"User %s (%s) opted out of receiving emails from course %s", request.user.username, request.user.email, course_key ) track.views.server_track(request, "change-email1-settings", {"receive_emails": "no", "course": course_key.to_deprecated_string()}, page='dashboard') break return blocked @login_required @ensure_csrf_cookie def dashboard(request): user = request.user platform_name = microsite.get_value("platform_name", settings.PLATFORM_NAME) # for microsites, we want to filter and only show enrollments for courses within # the microsites 'ORG' course_org_filter = microsite.get_value('course_org_filter') # Let's filter out any courses in an "org" that has been declared to be # in a Microsite org_filter_out_set = microsite.get_all_orgs() # remove our current Microsite from the "filter out" list, if applicable if course_org_filter: org_filter_out_set.remove(course_org_filter) # Build our (course, enrollment) list for the user, but ignore any courses that no # longer exist (because the course IDs have changed). Still, we don't delete those # enrollments, because it could have been a data push snafu. course_enrollments = list(get_course_enrollments(user, course_org_filter, org_filter_out_set)) # sort the enrollment pairs by the enrollment date course_enrollments.sort(key=lambda x: x.created, reverse=True) # Retrieve the course modes for each course enrolled_course_ids = [enrollment.course_id for enrollment in course_enrollments] __, unexpired_course_modes = CourseMode.all_and_unexpired_modes_for_courses(enrolled_course_ids) course_modes_by_course = { course_id: { mode.slug: mode for mode in modes } for course_id, modes in unexpired_course_modes.iteritems() } # Check to see if the student has recently enrolled in a course. # If so, display a notification message confirming the enrollment. enrollment_message = _create_recent_enrollment_message( course_enrollments, course_modes_by_course ) course_optouts = Optout.objects.filter(user=user).values_list('course_id', flat=True) message = "" if not user.is_active: message = render_to_string( 'registration/activate_account_notice.html', {'email': user.email, 'platform_name': platform_name} ) # Global staff can see what courses errored on their dashboard staff_access = False errored_courses = {} if has_access(user, 'staff', 'global'): # Show any courses that errored on load staff_access = True errored_courses = modulestore().get_errored_courses() show_courseware_links_for = frozenset( enrollment.course_id for enrollment in course_enrollments if has_access(request.user, 'load', enrollment.course_overview) and has_access(request.user, 'view_courseware_with_prerequisites', enrollment.course_overview) ) # Construct a dictionary of course mode information # used to render the course list. We re-use the course modes dict # we loaded earlier to avoid hitting the database. course_mode_info = { enrollment.course_id: complete_course_mode_info( enrollment.course_id, enrollment, modes=course_modes_by_course[enrollment.course_id] ) for enrollment in course_enrollments } # Determine the per-course verification status # This is a dictionary in which the keys are course locators # and the values are one of: # # VERIFY_STATUS_NEED_TO_VERIFY # VERIFY_STATUS_SUBMITTED # VERIFY_STATUS_APPROVED # VERIFY_STATUS_MISSED_DEADLINE # # Each of which correspond to a particular message to display # next to the course on the dashboard. # # If a course is not included in this dictionary, # there is no verification messaging to display. verify_status_by_course = check_verify_status_by_course(user, course_enrollments) cert_statuses = { enrollment.course_id: cert_info(request.user, enrollment.course_overview, enrollment.mode) for enrollment in course_enrollments } # only show email settings for Mongo course and when bulk email is turned on show_email_settings_for = frozenset( enrollment.course_id for enrollment in course_enrollments if ( settings.FEATURES['ENABLE_INSTRUCTOR_EMAIL'] and modulestore().get_modulestore_type(enrollment.course_id) != ModuleStoreEnum.Type.xml and CourseAuthorization.instructor_email_enabled(enrollment.course_id) ) ) # Verification Attempts # Used to generate the "you must reverify for course x" banner verification_status, verification_msg = SoftwareSecurePhotoVerification.user_status(user) # Gets data for midcourse reverifications, if any are necessary or have failed statuses = ["approved", "denied", "pending", "must_reverify"] reverifications = reverification_info(statuses) show_refund_option_for = frozenset( enrollment.course_id for enrollment in course_enrollments if enrollment.refundable() ) block_courses = frozenset( enrollment.course_id for enrollment in course_enrollments if is_course_blocked( request, CourseRegistrationCode.objects.filter( course_id=enrollment.course_id, registrationcoderedemption__redeemed_by=request.user ), enrollment.course_id ) ) enrolled_courses_either_paid = frozenset( enrollment.course_id for enrollment in course_enrollments if enrollment.is_paid_course() ) # If there are *any* denied reverifications that have not been toggled off, # we'll display the banner denied_banner = any(item.display for item in reverifications["denied"]) # Populate the Order History for the side-bar. order_history_list = order_history(user, course_org_filter=course_org_filter, org_filter_out_set=org_filter_out_set) # get list of courses having pre-requisites yet to be completed courses_having_prerequisites = frozenset( enrollment.course_id for enrollment in course_enrollments if enrollment.course_overview.pre_requisite_courses ) courses_requirements_not_met = get_pre_requisite_courses_not_completed(user, courses_having_prerequisites) if 'notlive' in request.GET: redirect_message = _("The course you are looking for does not start until {date}.").format( date=request.GET['notlive'] ) else: redirect_message = '' context = { 'enrollment_message': enrollment_message, 'redirect_message': redirect_message, 'course_enrollments': course_enrollments, 'course_optouts': course_optouts, 'message': message, 'staff_access': staff_access, 'errored_courses': errored_courses, 'show_courseware_links_for': show_courseware_links_for, 'all_course_modes': course_mode_info, 'cert_statuses': cert_statuses, 'credit_statuses': _credit_statuses(user, course_enrollments), 'show_email_settings_for': show_email_settings_for, 'reverifications': reverifications, 'verification_status': verification_status, 'verification_status_by_course': verify_status_by_course, 'verification_msg': verification_msg, 'show_refund_option_for': show_refund_option_for, 'block_courses': block_courses, 'denied_banner': denied_banner, 'billing_email': settings.PAYMENT_SUPPORT_EMAIL, 'user': user, 'logout_url': reverse(logout_user), 'platform_name': platform_name, 'enrolled_courses_either_paid': enrolled_courses_either_paid, 'provider_states': [], 'order_history_list': order_history_list, 'courses_requirements_not_met': courses_requirements_not_met, 'nav_hidden': True, } return render_to_response('dashboard.html', context) def _create_recent_enrollment_message(course_enrollments, course_modes): # pylint: disable=invalid-name """ Builds a recent course enrollment message. Constructs a new message template based on any recent course enrollments for the student. Args: course_enrollments (list[CourseEnrollment]): a list of course enrollments. course_modes (dict): Mapping of course ID's to course mode dictionaries. Returns: A string representing the HTML message output from the message template. None if there are no recently enrolled courses. """ recently_enrolled_courses = _get_recently_enrolled_courses(course_enrollments) if recently_enrolled_courses: messages = [ { "course_id": enrollment.course_overview.id, "course_name": enrollment.course_overview.display_name, "allow_donation": _allow_donation(course_modes, enrollment.course_overview.id, enrollment) } for enrollment in recently_enrolled_courses ] platform_name = microsite.get_value('platform_name', settings.PLATFORM_NAME) return render_to_string( 'enrollment/course_enrollment_message.html', {'course_enrollment_messages': messages, 'platform_name': platform_name} ) def _get_recently_enrolled_courses(course_enrollments): """ Given a list of enrollments, filter out all but recent enrollments. Args: course_enrollments (list[CourseEnrollment]): A list of course enrollments. Returns: list[CourseEnrollment]: A list of recent course enrollments. """ seconds = DashboardConfiguration.current().recent_enrollment_time_delta time_delta = (datetime.datetime.now(UTC) - datetime.timedelta(seconds=seconds)) return [ enrollment for enrollment in course_enrollments # If the enrollment has no created date, we are explicitly excluding the course # from the list of recent enrollments. if enrollment.is_active and enrollment.created > time_delta ] def _allow_donation(course_modes, course_id, enrollment): """Determines if the dashboard will request donations for the given course. Check if donations are configured for the platform, and if the current course is accepting donations. Args: course_modes (dict): Mapping of course ID's to course mode dictionaries. course_id (str): The unique identifier for the course. enrollment(CourseEnrollment): The enrollment object in which the user is enrolled Returns: True if the course is allowing donations. """ donations_enabled = DonationConfiguration.current().enabled return donations_enabled and enrollment.mode in course_modes[course_id] and course_modes[course_id][enrollment.mode].min_price == 0 def _update_email_opt_in(request, org): """Helper function used to hit the profile API if email opt-in is enabled.""" email_opt_in = request.POST.get('email_opt_in') if email_opt_in is not None: email_opt_in_boolean = email_opt_in == 'true' preferences_api.update_email_opt_in(request.user, org, email_opt_in_boolean) def _credit_statuses(user, course_enrollments): """ Retrieve the status for credit courses. A credit course is a course for which a user can purchased college credit. The current flow is: 1. User becomes eligible for credit (submits verifications, passes the course, etc.) 2. User purchases credit from a particular credit provider. 3. User requests credit from the provider, usually creating an account on the provider's site. 4. The credit provider notifies us whether the user's request for credit has been accepted or rejected. The dashboard is responsible for communicating the user's state in this flow. Arguments: user (User): The currently logged-in user. course_enrollments (list[CourseEnrollment]): List of enrollments for the user. Returns: dict The returned dictionary has keys that are `CourseKey`s and values that are dictionaries with: * eligible (bool): True if the user is eligible for credit in this course. * deadline (datetime): The deadline for purchasing and requesting credit for this course. * purchased (bool): Whether the user has purchased credit for this course. * provider_name (string): The display name of the credit provider. * provider_status_url (string): A URL the user can visit to check on their credit request status. * request_status (string): Either "pending", "approved", or "rejected" * error (bool): If true, an unexpected error occurred when retrieving the credit status, so the user should contact the support team. Example: >>> _credit_statuses(user, course_enrollments) { CourseKey.from_string("edX/DemoX/Demo_Course"): { "course_key": "edX/DemoX/Demo_Course", "eligible": True, "deadline": 2015-11-23 00:00:00 UTC, "purchased": True, "provider_name": "Hogwarts", "provider_status_url": "http://example.com/status", "request_status": "pending", "error": False } } """ from openedx.core.djangoapps.credit import api as credit_api # Feature flag off if not settings.FEATURES.get("ENABLE_CREDIT_ELIGIBILITY"): return {} request_status_by_course = { request["course_key"]: request["status"] for request in credit_api.get_credit_requests_for_user(user.username) } credit_enrollments = { enrollment.course_id: enrollment for enrollment in course_enrollments if enrollment.mode == "credit" } # When a user purchases credit in a course, the user's enrollment # mode is set to "credit" and an enrollment attribute is set # with the ID of the credit provider. We retrieve *all* such attributes # here to minimize the number of database queries. purchased_credit_providers = { attribute.enrollment.course_id: attribute.value for attribute in CourseEnrollmentAttribute.objects.filter( namespace="credit", name="provider_id", enrollment__in=credit_enrollments.values() ).select_related("enrollment") } provider_info_by_id = { provider["id"]: provider for provider in credit_api.get_credit_providers() } statuses = {} for eligibility in credit_api.get_eligibilities_for_user(user.username): course_key = CourseKey.from_string(unicode(eligibility["course_key"])) status = { "course_key": unicode(course_key), "eligible": True, "deadline": eligibility["deadline"], "purchased": course_key in credit_enrollments, "provider_name": None, "provider_status_url": None, "provider_id": None, "request_status": request_status_by_course.get(course_key), "error": False, } # If the user has purchased credit, then include information about the credit # provider from which the user purchased credit. # We retrieve the provider's ID from the an "enrollment attribute" set on the user's # enrollment when the user's order for credit is fulfilled by the E-Commerce service. if status["purchased"]: provider_id = purchased_credit_providers.get(course_key) if provider_id is None: status["error"] = True log.error( u"Could not find credit provider associated with credit enrollment " u"for user %s in course %s. The user will not be able to see his or her " u"credit request status on the student dashboard. This attribute should " u"have been set when the user purchased credit in the course.", user.id, course_key ) else: provider_info = provider_info_by_id.get(provider_id, {}) status["provider_name"] = provider_info.get("display_name") status["provider_status_url"] = provider_info.get("status_url") status["provider_id"] = provider_id statuses[course_key] = status return statuses @require_POST @commit_on_success_with_read_committed def change_enrollment(request, check_access=True): """ Modify the enrollment status for the logged-in user. The request parameter must be a POST request (other methods return 405) that specifies course_id and enrollment_action parameters. If course_id or enrollment_action is not specified, if course_id is not valid, if enrollment_action is something other than "enroll" or "unenroll", if enrollment_action is "enroll" and enrollment is closed for the course, or if enrollment_action is "unenroll" and the user is not enrolled in the course, a 400 error will be returned. If the user is not logged in, 403 will be returned; it is important that only this case return 403 so the front end can redirect the user to a registration or login page when this happens. This function should only be called from an AJAX request, so the error messages in the responses should never actually be user-visible. Args: request (`Request`): The Django request object Keyword Args: check_access (boolean): If True, we check that an accessible course actually exists for the given course_key before we enroll the student. The default is set to False to avoid breaking legacy code or code with non-standard flows (ex. beta tester invitations), but for any standard enrollment flow you probably want this to be True. Returns: Response """ # Get the user user = request.user # Ensure the user is authenticated if not user.is_authenticated(): return HttpResponseForbidden() # Ensure we received a course_id action = request.POST.get("enrollment_action") if 'course_id' not in request.POST: return HttpResponseBadRequest(_("Course id not specified")) try: course_id = SlashSeparatedCourseKey.from_deprecated_string(request.POST.get("course_id")) except InvalidKeyError: log.warning( u"User %s tried to %s with invalid course id: %s", user.username, action, request.POST.get("course_id"), ) return HttpResponseBadRequest(_("Invalid course id")) if action == "enroll": # Make sure the course exists # We don't do this check on unenroll, or a bad course id can't be unenrolled from if not modulestore().has_course(course_id): log.warning( u"User %s tried to enroll in non-existent course %s", user.username, course_id ) return HttpResponseBadRequest(_("Course id is invalid")) # Record the user's email opt-in preference if settings.FEATURES.get('ENABLE_MKTG_EMAIL_OPT_IN'): _update_email_opt_in(request, course_id.org) available_modes = CourseMode.modes_for_course_dict(course_id) # Check whether the user is blocked from enrolling in this course # This can occur if the user's IP is on a global blacklist # or if the user is enrolling in a country in which the course # is not available. redirect_url = embargo_api.redirect_if_blocked( course_id, user=user, ip_address=get_ip(request), url=request.path ) if redirect_url: return HttpResponse(redirect_url) # Check that auto enrollment is allowed for this course # (= the course is NOT behind a paywall) if CourseMode.can_auto_enroll(course_id): # Enroll the user using the default mode (honor) # We're assuming that users of the course enrollment table # will NOT try to look up the course enrollment model # by its slug. If they do, it's possible (based on the state of the database) # for no such model to exist, even though we've set the enrollment type # to "honor". try: CourseEnrollment.enroll(user, course_id, check_access=check_access) except Exception: return HttpResponseBadRequest(_("Could not enroll")) # If we have more than one course mode or professional ed is enabled, # then send the user to the choose your track page. # (In the case of no-id-professional/professional ed, this will redirect to a page that # funnels users directly into the verification / payment flow) if CourseMode.has_verified_mode(available_modes) or CourseMode.has_professional_mode(available_modes): return HttpResponse( reverse("course_modes_choose", kwargs={'course_id': unicode(course_id)}) ) # Otherwise, there is only one mode available (the default) return HttpResponse() elif action == "unenroll": if not CourseEnrollment.is_enrolled(user, course_id): return HttpResponseBadRequest(_("You are not enrolled in this course")) CourseEnrollment.unenroll(user, course_id) return HttpResponse() else: return HttpResponseBadRequest(_("Enrollment action is invalid")) # Need different levels of logging @ensure_csrf_cookie def login_user(request, error=""): # pylint: disable=too-many-statements,unused-argument """AJAX request to log in the user.""" backend_name = None email = None password = None redirect_url = None response = None running_pipeline = None third_party_auth_requested = third_party_auth.is_enabled() and pipeline.running(request) third_party_auth_successful = False trumped_by_first_party_auth = bool(request.POST.get('email')) or bool(request.POST.get('password')) user = None if third_party_auth_requested and not trumped_by_first_party_auth: # The user has already authenticated via third-party auth and has not # asked to do first party auth by supplying a username or password. We # now want to put them through the same logging and cookie calculation # logic as with first-party auth. running_pipeline = pipeline.get(request) username = running_pipeline['kwargs'].get('username') backend_name = running_pipeline['backend'] third_party_uid = running_pipeline['kwargs']['uid'] requested_provider = provider.Registry.get_from_pipeline(running_pipeline) try: user = pipeline.get_authenticated_user(requested_provider, username, third_party_uid) third_party_auth_successful = True except User.DoesNotExist: AUDIT_LOG.warning( u'Login failed - user with username {username} has no social auth with backend_name {backend_name}'.format( username=username, backend_name=backend_name)) return HttpResponse( _("You've successfully logged into your {provider_name} account, but this account isn't linked with an {platform_name} account yet.").format( platform_name=settings.PLATFORM_NAME, provider_name=requested_provider.name ) + "<br/><br/>" + _("Use your {platform_name} username and password to log into {platform_name} below, " "and then link your {platform_name} account with {provider_name} from your dashboard.").format( platform_name=settings.PLATFORM_NAME, provider_name=requested_provider.name ) + "<br/><br/>" + _("If you don't have an {platform_name} account yet, " "click <strong>Register</strong> at the top of the page.").format( platform_name=settings.PLATFORM_NAME), content_type="text/plain", status=403 ) else: if 'email' not in request.POST or 'password' not in request.POST: return JsonResponse({ "success": False, "value": _('There was an error receiving your login information. Please email us.'), # TODO: User error message }) # TODO: this should be status code 400 # pylint: disable=fixme email = request.POST['email'] password = request.POST['password'] try: user = User.objects.get(email=email) except User.DoesNotExist: if settings.FEATURES['SQUELCH_PII_IN_LOGS']: AUDIT_LOG.warning(u"Login failed - Unknown user email") else: AUDIT_LOG.warning(u"Login failed - Unknown user email: {0}".format(email)) # check if the user has a linked shibboleth account, if so, redirect the user to shib-login # This behavior is pretty much like what gmail does for shibboleth. Try entering some @stanford.edu # address into the Gmail login. if settings.FEATURES.get('AUTH_USE_SHIB') and user: try: eamap = ExternalAuthMap.objects.get(user=user) if eamap.external_domain.startswith(external_auth.views.SHIBBOLETH_DOMAIN_PREFIX): return JsonResponse({ "success": False, "redirect": reverse('shib-login'), }) # TODO: this should be status code 301 # pylint: disable=fixme except ExternalAuthMap.DoesNotExist: # This is actually the common case, logging in user without external linked login AUDIT_LOG.info(u"User %s w/o external auth attempting login", user) # see if account has been locked out due to excessive login failures user_found_by_email_lookup = user if user_found_by_email_lookup and LoginFailures.is_feature_enabled(): if LoginFailures.is_user_locked_out(user_found_by_email_lookup): return JsonResponse({ "success": False, "value": _('This account has been temporarily locked due to excessive login failures. Try again later.'), }) # TODO: this should be status code 429 # pylint: disable=fixme # see if the user must reset his/her password due to any policy settings if user_found_by_email_lookup and PasswordHistory.should_user_reset_password_now(user_found_by_email_lookup): return JsonResponse({ "success": False, "value": _('Your password has expired due to password policy on this account. You must ' 'reset your password before you can log in again. Please click the ' '"Forgot Password" link on this page to reset your password before logging in again.'), }) # TODO: this should be status code 403 # pylint: disable=fixme # if the user doesn't exist, we want to set the username to an invalid # username so that authentication is guaranteed to fail and we can take # advantage of the ratelimited backend username = user.username if user else "" if not third_party_auth_successful: try: user = authenticate(username=username, password=password, request=request) # this occurs when there are too many attempts from the same IP address except RateLimitException: return JsonResponse({ "success": False, "value": _('Too many failed login attempts. Try again later.'), }) # TODO: this should be status code 429 # pylint: disable=fixme if user is None: # tick the failed login counters if the user exists in the database if user_found_by_email_lookup and LoginFailures.is_feature_enabled(): LoginFailures.increment_lockout_counter(user_found_by_email_lookup) # if we didn't find this username earlier, the account for this email # doesn't exist, and doesn't have a corresponding password if username != "": if settings.FEATURES['SQUELCH_PII_IN_LOGS']: loggable_id = user_found_by_email_lookup.id if user_found_by_email_lookup else "<unknown>" AUDIT_LOG.warning(u"Login failed - password for user.id: {0} is invalid".format(loggable_id)) else: AUDIT_LOG.warning(u"Login failed - password for {0} is invalid".format(email)) return JsonResponse({ "success": False, "value": _('Email or password is incorrect.'), }) # TODO: this should be status code 400 # pylint: disable=fixme # successful login, clear failed login attempts counters, if applicable if LoginFailures.is_feature_enabled(): LoginFailures.clear_lockout_counter(user) # Track the user's sign in if settings.FEATURES.get('SEGMENT_IO_LMS') and hasattr(settings, 'SEGMENT_IO_LMS_KEY'): tracking_context = tracker.get_tracker().resolve_context() analytics.identify(user.id, { 'email': email, 'username': username, }) analytics.track( user.id, "edx.bi.user.account.authenticated", { 'category': "conversion", 'label': request.POST.get('course_id'), 'provider': None }, context={ 'Google Analytics': { 'clientId': tracking_context.get('client_id') } } ) if user is not None and user.is_active: try: # We do not log here, because we have a handler registered # to perform logging on successful logins. login(request, user) if request.POST.get('remember') == 'true': request.session.set_expiry(604800) log.debug("Setting user session to never expire") else: request.session.set_expiry(0) except Exception as exc: # pylint: disable=broad-except AUDIT_LOG.critical("Login failed - Could not create session. Is memcached running?") log.critical("Login failed - Could not create session. Is memcached running?") log.exception(exc) raise redirect_url = None # The AJAX method calling should know the default destination upon success if third_party_auth_successful: redirect_url = pipeline.get_complete_url(backend_name) response = JsonResponse({ "success": True, "redirect_url": redirect_url, }) # Ensure that the external marketing site can # detect that the user is logged in. return set_logged_in_cookies(request, response, user) if settings.FEATURES['SQUELCH_PII_IN_LOGS']: AUDIT_LOG.warning(u"Login failed - Account not active for user.id: {0}, resending activation".format(user.id)) else: AUDIT_LOG.warning(u"Login failed - Account not active for user {0}, resending activation".format(username)) reactivation_email_for_user(user) not_activated_msg = _("This account has not been activated. We have sent another activation message. Please check your email for the activation instructions.") return JsonResponse({ "success": False, "value": not_activated_msg, }) # TODO: this should be status code 400 # pylint: disable=fixme @csrf_exempt @require_POST @social_utils.strategy("social:complete") def login_oauth_token(request, backend): """ Authenticate the client using an OAuth access token by using the token to retrieve information from a third party and matching that information to an existing user. """ warnings.warn("Please use AccessTokenExchangeView instead.", DeprecationWarning) backend = request.backend if isinstance(backend, social_oauth.BaseOAuth1) or isinstance(backend, social_oauth.BaseOAuth2): if "access_token" in request.POST: # Tell third party auth pipeline that this is an API call request.session[pipeline.AUTH_ENTRY_KEY] = pipeline.AUTH_ENTRY_LOGIN_API user = None try: user = backend.do_auth(request.POST["access_token"]) except (HTTPError, AuthException): pass # do_auth can return a non-User object if it fails if user and isinstance(user, User): login(request, user) return JsonResponse(status=204) else: # Ensure user does not re-enter the pipeline request.social_strategy.clean_partial_pipeline() return JsonResponse({"error": "invalid_token"}, status=401) else: return JsonResponse({"error": "invalid_request"}, status=400) raise Http404 @ensure_csrf_cookie def logout_user(request): """ HTTP request to log out the user. Redirects to marketing page. Deletes both the CSRF and sessionid cookies so the marketing site can determine the logged in state of the user """ # We do not log here, because we have a handler registered # to perform logging on successful logouts. logout(request) if settings.FEATURES.get('AUTH_USE_CAS'): target = reverse('cas-logout') else: target = '/' response = redirect(target) delete_logged_in_cookies(response) return response @require_GET @login_required @ensure_csrf_cookie def manage_user_standing(request): """ Renders the view used to manage user standing. Also displays a table of user accounts that have been disabled and who disabled them. """ if not request.user.is_staff: raise Http404 all_disabled_accounts = UserStanding.objects.filter( account_status=UserStanding.ACCOUNT_DISABLED ) all_disabled_users = [standing.user for standing in all_disabled_accounts] headers = ['username', 'account_changed_by'] rows = [] for user in all_disabled_users: row = [user.username, user.standing.all()[0].changed_by] rows.append(row) context = {'headers': headers, 'rows': rows} return render_to_response("manage_user_standing.html", context) @require_POST @login_required @ensure_csrf_cookie def disable_account_ajax(request): """ Ajax call to change user standing. Endpoint of the form in manage_user_standing.html """ if not request.user.is_staff: raise Http404 username = request.POST.get('username') context = {} if username is None or username.strip() == '': context['message'] = _('Please enter a username') return JsonResponse(context, status=400) account_action = request.POST.get('account_action') if account_action is None: context['message'] = _('Please choose an option') return JsonResponse(context, status=400) username = username.strip() try: user = User.objects.get(username=username) except User.DoesNotExist: context['message'] = _("User with username {} does not exist").format(username) return JsonResponse(context, status=400) else: user_account, _success = UserStanding.objects.get_or_create( user=user, defaults={'changed_by': request.user}, ) if account_action == 'disable': user_account.account_status = UserStanding.ACCOUNT_DISABLED context['message'] = _("Successfully disabled {}'s account").format(username) log.info(u"%s disabled %s's account", request.user, username) elif account_action == 'reenable': user_account.account_status = UserStanding.ACCOUNT_ENABLED context['message'] = _("Successfully reenabled {}'s account").format(username) log.info(u"%s reenabled %s's account", request.user, username) else: context['message'] = _("Unexpected account status") return JsonResponse(context, status=400) user_account.changed_by = request.user user_account.standing_last_changed_at = datetime.datetime.now(UTC) user_account.save() return JsonResponse(context) @login_required @ensure_csrf_cookie def change_setting(request): """JSON call to change a profile setting: Right now, location""" # TODO (vshnayder): location is no longer used u_prof = UserProfile.objects.get(user=request.user) # request.user.profile_cache if 'location' in request.POST: u_prof.location = request.POST['location'] u_prof.save() return JsonResponse({ "success": True, "location": u_prof.location, }) class AccountValidationError(Exception): def __init__(self, message, field): super(AccountValidationError, self).__init__(message) self.field = field @receiver(post_save, sender=User) def user_signup_handler(sender, **kwargs): # pylint: disable=unused-argument """ handler that saves the user Signup Source when the user is created """ if 'created' in kwargs and kwargs['created']: site = microsite.get_value('SITE_NAME') if site: user_signup_source = UserSignupSource(user=kwargs['instance'], site=site) user_signup_source.save() log.info(u'user {} originated from a white labeled "Microsite"'.format(kwargs['instance'].id)) def _do_create_account(form): """ Given cleaned post variables, create the User and UserProfile objects, as well as the registration for this user. Returns a tuple (User, UserProfile, Registration). Note: this function is also used for creating test users. """ if not form.is_valid(): raise ValidationError(form.errors) user = User( username=form.cleaned_data["username"], email=form.cleaned_data["email"], is_active=False ) user.set_password(form.cleaned_data["password"]) registration = Registration() # TODO: Rearrange so that if part of the process fails, the whole process fails. # Right now, we can have e.g. no registration e-mail sent out and a zombie account try: user.save() except IntegrityError: # Figure out the cause of the integrity error if len(User.objects.filter(username=user.username)) > 0: raise AccountValidationError( _("An account with the Public Username '{username}' already exists.").format(username=user.username), field="username" ) elif len(User.objects.filter(email=user.email)) > 0: raise AccountValidationError( _("An account with the Email '{email}' already exists.").format(email=user.email), field="email" ) else: raise # add this account creation to password history # NOTE, this will be a NOP unless the feature has been turned on in configuration password_history_entry = PasswordHistory() password_history_entry.create(user) registration.register(user) profile_fields = [ "name", "level_of_education", "gender", "mailing_address", "city", "country", "goals", "year_of_birth" ] profile = UserProfile( user=user, **{key: form.cleaned_data.get(key) for key in profile_fields} ) extended_profile = form.cleaned_extended_profile if extended_profile: profile.meta = json.dumps(extended_profile) try: profile.save() except Exception: # pylint: disable=broad-except log.exception("UserProfile creation failed for user {id}.".format(id=user.id)) raise return (user, profile, registration) def create_account_with_params(request, params): """ Given a request and a dict of parameters (which may or may not have come from the request), create an account for the requesting user, including creating a comments service user object and sending an activation email. This also takes external/third-party auth into account, updates that as necessary, and authenticates the user for the request's session. Does not return anything. Raises AccountValidationError if an account with the username or email specified by params already exists, or ValidationError if any of the given parameters is invalid for any other reason. Issues with this code: * It is not transactional. If there is a failure part-way, an incomplete account will be created and left in the database. * Third-party auth passwords are not verified. There is a comment that they are unused, but it would be helpful to have a sanity check that they are sane. * It is over 300 lines long (!) and includes disprate functionality, from registration e-mails to all sorts of other things. It should be broken up into semantically meaningful functions. * The user-facing text is rather unfriendly (e.g. "Username must be a minimum of two characters long" rather than "Please use a username of at least two characters"). """ # Copy params so we can modify it; we can't just do dict(params) because if # params is request.POST, that results in a dict containing lists of values params = dict(params.items()) # allow for microsites to define their own set of required/optional/hidden fields extra_fields = microsite.get_value( 'REGISTRATION_EXTRA_FIELDS', getattr(settings, 'REGISTRATION_EXTRA_FIELDS', {}) ) # Boolean of whether a 3rd party auth provider and credentials were provided in # the API so the newly created account can link with the 3rd party account. # # Note: this is orthogonal to the 3rd party authentication pipeline that occurs # when the account is created via the browser and redirect URLs. should_link_with_social_auth = third_party_auth.is_enabled() and 'provider' in params if should_link_with_social_auth or (third_party_auth.is_enabled() and pipeline.running(request)): params["password"] = pipeline.make_random_password() # if doing signup for an external authorization, then get email, password, name from the eamap # don't use the ones from the form, since the user could have hacked those # unless originally we didn't get a valid email or name from the external auth # TODO: We do not check whether these values meet all necessary criteria, such as email length do_external_auth = 'ExternalAuthMap' in request.session if do_external_auth: eamap = request.session['ExternalAuthMap'] try: validate_email(eamap.external_email) params["email"] = eamap.external_email except ValidationError: pass if eamap.external_name.strip() != '': params["name"] = eamap.external_name params["password"] = eamap.internal_password log.debug(u'In create_account with external_auth: user = %s, email=%s', params["name"], params["email"]) extended_profile_fields = microsite.get_value('extended_profile_fields', []) enforce_password_policy = ( settings.FEATURES.get("ENFORCE_PASSWORD_POLICY", False) and not do_external_auth ) # Can't have terms of service for certain SHIB users, like at Stanford tos_required = ( not settings.FEATURES.get("AUTH_USE_SHIB") or not settings.FEATURES.get("SHIB_DISABLE_TOS") or not do_external_auth or not eamap.external_domain.startswith( external_auth.views.SHIBBOLETH_DOMAIN_PREFIX ) ) form = AccountCreationForm( data=params, extra_fields=extra_fields, extended_profile_fields=extended_profile_fields, enforce_username_neq_password=True, enforce_password_policy=enforce_password_policy, tos_required=tos_required, ) # Perform operations within a transaction that are critical to account creation with transaction.commit_on_success(): # first, create the account (user, profile, registration) = _do_create_account(form) # next, link the account with social auth, if provided via the API. # (If the user is using the normal register page, the social auth pipeline does the linking, not this code) if should_link_with_social_auth: backend_name = params['provider'] request.social_strategy = social_utils.load_strategy(request) redirect_uri = reverse('social:complete', args=(backend_name, )) request.backend = social_utils.load_backend(request.social_strategy, backend_name, redirect_uri) social_access_token = params.get('access_token') if not social_access_token: raise ValidationError({ 'access_token': [ _("An access_token is required when passing value ({}) for provider.").format( params['provider'] ) ] }) request.session[pipeline.AUTH_ENTRY_KEY] = pipeline.AUTH_ENTRY_REGISTER_API pipeline_user = None error_message = "" try: pipeline_user = request.backend.do_auth(social_access_token, user=user) except AuthAlreadyAssociated: error_message = _("The provided access_token is already associated with another user.") except (HTTPError, AuthException): error_message = _("The provided access_token is not valid.") if not pipeline_user or not isinstance(pipeline_user, User): # Ensure user does not re-enter the pipeline request.social_strategy.clean_partial_pipeline() raise ValidationError({'access_token': [error_message]}) # Perform operations that are non-critical parts of account creation preferences_api.set_user_preference(user, LANGUAGE_KEY, get_language()) if settings.FEATURES.get('ENABLE_DISCUSSION_EMAIL_DIGEST'): try: enable_notifications(user) except Exception: log.exception("Enable discussion notifications failed for user {id}.".format(id=user.id)) dog_stats_api.increment("common.student.account_created") # If the user is registering via 3rd party auth, track which provider they use third_party_provider = None running_pipeline = None if third_party_auth.is_enabled() and pipeline.running(request): running_pipeline = pipeline.get(request) third_party_provider = provider.Registry.get_from_pipeline(running_pipeline) # Track the user's registration if settings.FEATURES.get('SEGMENT_IO_LMS') and hasattr(settings, 'SEGMENT_IO_LMS_KEY'): tracking_context = tracker.get_tracker().resolve_context() analytics.identify(user.id, { 'email': user.email, 'username': user.username, }) analytics.track( user.id, "edx.bi.user.account.registered", { 'category': 'conversion', 'label': params.get('course_id'), 'provider': third_party_provider.name if third_party_provider else None }, context={ 'Google Analytics': { 'clientId': tracking_context.get('client_id') } } ) create_comments_service_user(user) # Don't send email if we are: # # 1. Doing load testing. # 2. Random user generation for other forms of testing. # 3. External auth bypassing activation. # 4. Have the platform configured to not require e-mail activation. # 5. Registering a new user using a trusted third party provider (with skip_email_verification=True) # # Note that this feature is only tested as a flag set one way or # the other for *new* systems. we need to be careful about # changing settings on a running system to make sure no users are # left in an inconsistent state (or doing a migration if they are). send_email = ( not settings.FEATURES.get('SKIP_EMAIL_VALIDATION', None) and not settings.FEATURES.get('AUTOMATIC_AUTH_FOR_TESTING') and not (do_external_auth and settings.FEATURES.get('BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH')) and not ( third_party_provider and third_party_provider.skip_email_verification and user.email == running_pipeline['kwargs'].get('details', {}).get('email') ) ) if send_email: context = { 'name': profile.name, 'key': registration.activation_key, } # composes activation email subject = render_to_string('emails/activation_email_subject.txt', context) # Email subject *must not* contain newlines subject = ''.join(subject.splitlines()) message = render_to_string('emails/activation_email.txt', context) from_address = microsite.get_value( 'email_from_address', settings.DEFAULT_FROM_EMAIL ) try: if settings.FEATURES.get('REROUTE_ACTIVATION_EMAIL'): dest_addr = settings.FEATURES['REROUTE_ACTIVATION_EMAIL'] message = ("Activation for %s (%s): %s\n" % (user, user.email, profile.name) + '-' * 80 + '\n\n' + message) mail.send_mail(subject, message, from_address, [dest_addr], fail_silently=False) else: user.email_user(subject, message, from_address) except Exception: # pylint: disable=broad-except log.error(u'Unable to send activation email to user from "%s"', from_address, exc_info=True) else: registration.activate() # Immediately after a user creates an account, we log them in. They are only # logged in until they close the browser. They can't log in again until they click # the activation link from the email. new_user = authenticate(username=user.username, password=params['password']) login(request, new_user) request.session.set_expiry(0) # TODO: there is no error checking here to see that the user actually logged in successfully, # and is not yet an active user. if new_user is not None: AUDIT_LOG.info(u"Login success on new account creation - {0}".format(new_user.username)) if do_external_auth: eamap.user = new_user eamap.dtsignup = datetime.datetime.now(UTC) eamap.save() AUDIT_LOG.info(u"User registered with external_auth %s", new_user.username) AUDIT_LOG.info(u'Updated ExternalAuthMap for %s to be %s', new_user.username, eamap) if settings.FEATURES.get('BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH'): log.info('bypassing activation email') new_user.is_active = True new_user.save() AUDIT_LOG.info(u"Login activated on extauth account - {0} ({1})".format(new_user.username, new_user.email)) return new_user @csrf_exempt def create_account(request, post_override=None): """ JSON call to create new edX account. Used by form in signup_modal.html, which is included into navigation.html """ warnings.warn("Please use RegistrationView instead.", DeprecationWarning) try: user = create_account_with_params(request, post_override or request.POST) except AccountValidationError as exc: return JsonResponse({'success': False, 'value': exc.message, 'field': exc.field}, status=400) except ValidationError as exc: field, error_list = next(exc.message_dict.iteritems()) return JsonResponse( { "success": False, "field": field, "value": error_list[0], }, status=400 ) redirect_url = None # The AJAX method calling should know the default destination upon success # Resume the third-party-auth pipeline if necessary. if third_party_auth.is_enabled() and pipeline.running(request): running_pipeline = pipeline.get(request) redirect_url = pipeline.get_complete_url(running_pipeline['backend']) response = JsonResponse({ 'success': True, 'redirect_url': redirect_url, }) set_logged_in_cookies(request, response, user) return response def auto_auth(request): """ Create or configure a user account, then log in as that user. Enabled only when settings.FEATURES['AUTOMATIC_AUTH_FOR_TESTING'] is true. Accepts the following querystring parameters: * `username`, `email`, and `password` for the user account * `full_name` for the user profile (the user's full name; defaults to the username) * `staff`: Set to "true" to make the user global staff. * `course_id`: Enroll the student in the course with `course_id` * `roles`: Comma-separated list of roles to grant the student in the course with `course_id` * `no_login`: Define this to create the user but not login If username, email, or password are not provided, use randomly generated credentials. """ # Generate a unique name to use if none provided unique_name = uuid.uuid4().hex[0:30] # Use the params from the request, otherwise use these defaults username = request.GET.get('username', unique_name) password = request.GET.get('password', unique_name) email = request.GET.get('email', unique_name + "@example.com") full_name = request.GET.get('full_name', username) is_staff = request.GET.get('staff', None) course_id = request.GET.get('course_id', None) course_key = None if course_id: course_key = CourseLocator.from_string(course_id) role_names = [v.strip() for v in request.GET.get('roles', '').split(',') if v.strip()] login_when_done = 'no_login' not in request.GET form = AccountCreationForm( data={ 'username': username, 'email': email, 'password': password, 'name': full_name, }, tos_required=False ) # Attempt to create the account. # If successful, this will return a tuple containing # the new user object. try: user, profile, reg = _do_create_account(form) except AccountValidationError: # Attempt to retrieve the existing user. user = User.objects.get(username=username) user.email = email user.set_password(password) user.save() profile = UserProfile.objects.get(user=user) reg = Registration.objects.get(user=user) # Set the user's global staff bit if is_staff is not None: user.is_staff = (is_staff == "true") user.save() # Activate the user reg.activate() reg.save() # ensure parental consent threshold is met year = datetime.date.today().year age_limit = settings.PARENTAL_CONSENT_AGE_LIMIT profile.year_of_birth = (year - age_limit) - 1 profile.save() # Enroll the user in a course if course_key is not None: CourseEnrollment.enroll(user, course_key) # Apply the roles for role_name in role_names: role = Role.objects.get(name=role_name, course_id=course_key) user.roles.add(role) # Log in as the user if login_when_done: user = authenticate(username=username, password=password) login(request, user) create_comments_service_user(user) # Provide the user with a valid CSRF token # then return a 200 response if request.META.get('HTTP_ACCEPT') == 'application/json': response = JsonResponse({ 'created_status': u"Logged in" if login_when_done else "Created", 'username': username, 'email': email, 'password': password, 'user_id': user.id, # pylint: disable=no-member 'anonymous_id': anonymous_id_for_user(user, None), }) else: success_msg = u"{} user {} ({}) with password {} and user_id {}".format( u"Logged in" if login_when_done else "Created", username, email, password, user.id # pylint: disable=no-member ) response = HttpResponse(success_msg) response.set_cookie('csrftoken', csrf(request)['csrf_token']) return response @ensure_csrf_cookie def activate_account(request, key): """When link in activation e-mail is clicked""" regs = Registration.objects.filter(activation_key=key) if len(regs) == 1: user_logged_in = request.user.is_authenticated() already_active = True if not regs[0].user.is_active: regs[0].activate() already_active = False # Enroll student in any pending courses he/she may have if auto_enroll flag is set student = User.objects.filter(id=regs[0].user_id) if student: ceas = CourseEnrollmentAllowed.objects.filter(email=student[0].email) for cea in ceas: if cea.auto_enroll: enrollment = CourseEnrollment.enroll(student[0], cea.course_id) manual_enrollment_audit = ManualEnrollmentAudit.get_manual_enrollment_by_email(student[0].email) if manual_enrollment_audit is not None: # get the enrolled by user and reason from the ManualEnrollmentAudit table. # then create a new ManualEnrollmentAudit table entry for the same email # different transition state. ManualEnrollmentAudit.create_manual_enrollment_audit( manual_enrollment_audit.enrolled_by, student[0].email, ALLOWEDTOENROLL_TO_ENROLLED, manual_enrollment_audit.reason, enrollment ) resp = render_to_response( "registration/activation_complete.html", { 'user_logged_in': user_logged_in, 'already_active': already_active } ) return resp if len(regs) == 0: return render_to_response( "registration/activation_invalid.html", {'csrf': csrf(request)['csrf_token']} ) return HttpResponseServerError(_("Unknown error. Please e-mail us to let us know how it happened.")) @csrf_exempt @require_POST def password_reset(request): """ Attempts to send a password reset e-mail. """ # Add some rate limiting here by re-using the RateLimitMixin as a helper class limiter = BadRequestRateLimiter() if limiter.is_rate_limit_exceeded(request): AUDIT_LOG.warning("Rate limit exceeded in password_reset") return HttpResponseForbidden() form = PasswordResetFormNoActive(request.POST) if form.is_valid(): form.save(use_https=request.is_secure(), from_email=settings.DEFAULT_FROM_EMAIL, request=request, domain_override=request.get_host()) # When password change is complete, a "edx.user.settings.changed" event will be emitted. # But because changing the password is multi-step, we also emit an event here so that we can # track where the request was initiated. tracker.emit( SETTING_CHANGE_INITIATED, { "setting": "password", "old": None, "new": None, "user_id": request.user.id, } ) else: # bad user? tick the rate limiter counter AUDIT_LOG.info("Bad password_reset user passed in.") limiter.tick_bad_request_counter(request) return JsonResponse({ 'success': True, 'value': render_to_string('registration/password_reset_done.html', {}), }) def password_reset_confirm_wrapper( request, uidb36=None, token=None, ): """ A wrapper around django.contrib.auth.views.password_reset_confirm. Needed because we want to set the user as active at this step. """ # cribbed from django.contrib.auth.views.password_reset_confirm try: uid_int = base36_to_int(uidb36) user = User.objects.get(id=uid_int) user.is_active = True user.save() except (ValueError, User.DoesNotExist): pass # tie in password strength enforcement as an optional level of # security protection err_msg = None if request.method == 'POST': password = request.POST['new_password1'] if settings.FEATURES.get('ENFORCE_PASSWORD_POLICY', False): try: validate_password_length(password) validate_password_complexity(password) validate_password_dictionary(password) except ValidationError, err: err_msg = _('Password: ') + '; '.join(err.messages) # also, check the password reuse policy if not PasswordHistory.is_allowable_password_reuse(user, password): if user.is_staff: num_distinct = settings.ADVANCED_SECURITY_CONFIG['MIN_DIFFERENT_STAFF_PASSWORDS_BEFORE_REUSE'] else: num_distinct = settings.ADVANCED_SECURITY_CONFIG['MIN_DIFFERENT_STUDENT_PASSWORDS_BEFORE_REUSE'] err_msg = ungettext( "You are re-using a password that you have used recently. You must have {num} distinct password before reusing a previous password.", "You are re-using a password that you have used recently. You must have {num} distinct passwords before reusing a previous password.", num_distinct ).format(num=num_distinct) # also, check to see if passwords are getting reset too frequent if PasswordHistory.is_password_reset_too_soon(user): num_days = settings.ADVANCED_SECURITY_CONFIG['MIN_TIME_IN_DAYS_BETWEEN_ALLOWED_RESETS'] err_msg = ungettext( "You are resetting passwords too frequently. Due to security policies, {num} day must elapse between password resets.", "You are resetting passwords too frequently. Due to security policies, {num} days must elapse between password resets.", num_days ).format(num=num_days) if err_msg: # We have an password reset attempt which violates some security policy, use the # existing Django template to communicate this back to the user context = { 'validlink': True, 'form': None, 'title': _('Password reset unsuccessful'), 'err_msg': err_msg, 'platform_name': settings.PLATFORM_NAME, } return TemplateResponse(request, 'registration/password_reset_confirm.html', context) else: # we also want to pass settings.PLATFORM_NAME in as extra_context extra_context = {"platform_name": settings.PLATFORM_NAME} if request.method == 'POST': # remember what the old password hash is before we call down old_password_hash = user.password result = password_reset_confirm( request, uidb36=uidb36, token=token, extra_context=extra_context ) # get the updated user updated_user = User.objects.get(id=uid_int) # did the password hash change, if so record it in the PasswordHistory if updated_user.password != old_password_hash: entry = PasswordHistory() entry.create(updated_user) return result else: return password_reset_confirm( request, uidb36=uidb36, token=token, extra_context=extra_context ) def reactivation_email_for_user(user): try: reg = Registration.objects.get(user=user) except Registration.DoesNotExist: return JsonResponse({ "success": False, "error": _('No inactive user with this e-mail exists'), }) # TODO: this should be status code 400 # pylint: disable=fixme context = { 'name': user.profile.name, 'key': reg.activation_key, } subject = render_to_string('emails/activation_email_subject.txt', context) subject = ''.join(subject.splitlines()) message = render_to_string('emails/activation_email.txt', context) try: user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL) except Exception: # pylint: disable=broad-except log.error(u'Unable to send reactivation email from "%s"', settings.DEFAULT_FROM_EMAIL, exc_info=True) return JsonResponse({ "success": False, "error": _('Unable to send reactivation email') }) # TODO: this should be status code 500 # pylint: disable=fixme return JsonResponse({"success": True}) def validate_new_email(user, new_email): """ Given a new email for a user, does some basic verification of the new address If any issues are encountered with verification a ValueError will be thrown. """ try: validate_email(new_email) except ValidationError: raise ValueError(_('Valid e-mail address required.')) if new_email == user.email: raise ValueError(_('Old email is the same as the new email.')) if User.objects.filter(email=new_email).count() != 0: raise ValueError(_('An account with this e-mail already exists.')) def do_email_change_request(user, new_email, activation_key=None): """ Given a new email for a user, does some basic verification of the new address and sends an activation message to the new address. If any issues are encountered with verification or sending the message, a ValueError will be thrown. """ pec_list = PendingEmailChange.objects.filter(user=user) if len(pec_list) == 0: pec = PendingEmailChange() pec.user = user else: pec = pec_list[0] # if activation_key is not passing as an argument, generate a random key if not activation_key: activation_key = uuid.uuid4().hex pec.new_email = new_email pec.activation_key = activation_key pec.save() context = { 'key': pec.activation_key, 'old_email': user.email, 'new_email': pec.new_email } subject = render_to_string('emails/email_change_subject.txt', context) subject = ''.join(subject.splitlines()) message = render_to_string('emails/email_change.txt', context) from_address = microsite.get_value( 'email_from_address', settings.DEFAULT_FROM_EMAIL ) try: mail.send_mail(subject, message, from_address, [pec.new_email]) except Exception: # pylint: disable=broad-except log.error(u'Unable to send email activation link to user from "%s"', from_address, exc_info=True) raise ValueError(_('Unable to send email activation link. Please try again later.')) # When the email address change is complete, a "edx.user.settings.changed" event will be emitted. # But because changing the email address is multi-step, we also emit an event here so that we can # track where the request was initiated. tracker.emit( SETTING_CHANGE_INITIATED, { "setting": "email", "old": context['old_email'], "new": context['new_email'], "user_id": user.id, } ) @ensure_csrf_cookie @transaction.commit_manually def confirm_email_change(request, key): # pylint: disable=unused-argument """ User requested a new e-mail. This is called when the activation link is clicked. We confirm with the old e-mail, and update """ try: try: pec = PendingEmailChange.objects.get(activation_key=key) except PendingEmailChange.DoesNotExist: response = render_to_response("invalid_email_key.html", {}) transaction.rollback() return response user = pec.user address_context = { 'old_email': user.email, 'new_email': pec.new_email } if len(User.objects.filter(email=pec.new_email)) != 0: response = render_to_response("email_exists.html", {}) transaction.rollback() return response subject = render_to_string('emails/email_change_subject.txt', address_context) subject = ''.join(subject.splitlines()) message = render_to_string('emails/confirm_email_change.txt', address_context) u_prof = UserProfile.objects.get(user=user) meta = u_prof.get_meta() if 'old_emails' not in meta: meta['old_emails'] = [] meta['old_emails'].append([user.email, datetime.datetime.now(UTC).isoformat()]) u_prof.set_meta(meta) u_prof.save() # Send it to the old email... try: user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL) except Exception: # pylint: disable=broad-except log.warning('Unable to send confirmation email to old address', exc_info=True) response = render_to_response("email_change_failed.html", {'email': user.email}) transaction.rollback() return response user.email = pec.new_email user.save() pec.delete() # And send it to the new email... try: user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL) except Exception: # pylint: disable=broad-except log.warning('Unable to send confirmation email to new address', exc_info=True) response = render_to_response("email_change_failed.html", {'email': pec.new_email}) transaction.rollback() return response response = render_to_response("email_change_successful.html", address_context) transaction.commit() return response except Exception: # pylint: disable=broad-except # If we get an unexpected exception, be sure to rollback the transaction transaction.rollback() raise @require_POST @login_required @ensure_csrf_cookie def change_email_settings(request): """Modify logged-in user's setting for receiving emails from a course.""" user = request.user course_id = request.POST.get("course_id") course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id) receive_emails = request.POST.get("receive_emails") if receive_emails: optout_object = Optout.objects.filter(user=user, course_id=course_key) if optout_object: optout_object.delete() log.info( u"User %s (%s) opted in to receive emails from course %s", user.username, user.email, course_id ) track.views.server_track(request, "change-email-settings", {"receive_emails": "yes", "course": course_id}, page='dashboard') else: Optout.objects.get_or_create(user=user, course_id=course_key) log.info( u"User %s (%s) opted out of receiving emails from course %s", user.username, user.email, course_id ) track.views.server_track(request, "change-email-settings", {"receive_emails": "no", "course": course_id}, page='dashboard') return JsonResponse({"success": True})
jamesblunt/edx-platform
common/djangoapps/student/views.py
Python
agpl-3.0
91,324
[ "VisIt" ]
bf256e297c3204fb369120563101bc0ffecc32583c5ddddcd0125a95a2123d21